how can i fetch the latitude, longitude from url 1 - http://api.linear.com/api/iplookup.json
and
pass in url 2 - https://linear.openstreetmap.org/reverse?lat=28.6331&lon=77.2207&format=json
using python request module with get and post method
import requests
data=requests.get("http://api.letgo.com/api/iplookup.json")
k=data.text
bad= ['{','}','"']
for i in bad:
k= k.replace(i, '')
#printing string without few signs
l=k.split(",")
for j in l:
print(j)
this is how i change before
import requests
url='https://nominatim.openstreetmap.org/reverse'
obj={'lat':'28.6331','lon':'77.2207','format':'json'}
op=requests.post(url,params=obj)
op.url
You can get the data directly without converting the response to data.text first:
import requests
url = 'https://api.letgo.com/api/iplookup.json'
data = requests.get(url).json()
lat = data['latitude']
lon = data['longitude']
url2 = 'https://nominatim.openstreetmap.org/reverse'
params = {'lat': lat, 'lon': lon, 'format': 'json'}
req = requests.post(url2, params=params)
print(req.text)
Related
Hi there
i have reset my credentials as suggested in similar question previously, however i am
still receiving the same error 'groups'.....
def getNearbyVenues(names, latitudes, longitudes):
radius=500
LIMIT=100
venues_list=[]
for name, lat, lng in zip(names, latitudes, longitudes):
print(name)
url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v=
{}&ll={},{}&radius={}&limit={}'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
lat,
lng,
radius,
LIMIT)
results = requests.get(url).json()["response"]['groups'][0]['items']
venues_list.append([(
name,
lat,
lng,
v['venue']['name'],
v['venue']['location']['lat'],
v['venue']['location']['lng'],
v['venue']['categories'][0]['name']) for v in results])
nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])
nearby_venues.columns = ['Neighborhood',
'Neighborhood Latitude',
'Neighborhood Longitude',
'Venue',
'Venue Latitude',
'Venue Longitude',
'Venue Category']
return(nearby_venues)
illembe_venues = getNearbyVenues(names=df_illembe['Neighborhood'],
latitudes=df_illembe['Latitude'],
longitudes=df_illembe['Longitude']
)
i have also written the code in a new notebook and receiving the same error
I'm using the basemap library to display spatial information from Copernicus program.
The issue is i can not figure out how to project the data on the robin projection, but I do it correctly with the orthogonal projection.
So currently, I tried this :
plt.ioff()
# adapt for location of datasources
filePath = '../data/grib/download.grib'
# load data
grbs = grb.open(filePath)
grbs.seek(0)
data, lats, lons = (None, None, None)
dataUnit = None
title = None
for g in grbs:
data, lats, lons = g.data()
name = g.name
level = g.level
pressureUnit = g.pressureUnits
date = g.validDate
dataUnit = g.units
title = name + ' at ' + str(level) + ' ' + str(pressureUnit) + ' [' + str(date) + ']'
print(title)
break
# mapPlot = Basemap(projection='ortho', lat_0=0, lon_0=0)
mapPlot = Basemap(projection='robin', lat_0=0, lon_0=0, resolution='l')
mapPlot.drawcoastlines(linewidth=0.25)
x, y = mapPlot(lons, lats)
mapPlot.contourf(x, y, data)
mapPlot.colorbar(location='bottom', format='%.1f', label=dataUnit)
plt.title(title)
plt.show()
The orthogonal projection works correctly. But for the robin projection, I have an ... interesting pattern.
What I'm doing wrong ?
So i figure out how to do. I was misled but the first examples I saw.
Here is a my code:
import matplotlib
from mpl_toolkits.basemap import Basemap, shiftgrid
import matplotlib.pyplot as plt
import numpy as np
import pygrib as grb
# Get data
data = g['values']
lats = g['distinctLatitudes'] # 1D vector
lons = g['distinctLongitudes'] # 1D vector
# Useful information for late
name = g.name
level = str(g.level) + g.pressureUnits
date = g.validDate
dataUnit = g.units
# Parse the data
# Shit the data to start à -180. This is important to mark the data to start at -180°
data, lons = shiftgrid(180., data, lons, start=False) # shiftgrid
# Choose a representation (works with both)
# mapPlot = Basemap(projection='ortho', lat_0=0, lon_0=0)
mapPlot = Basemap(projection='robin', lat_0=0, lon_0=0)
mapPlot.drawcoastlines(linewidth=0.25)
# Convert the coordinates into the map projection
x, y = mapPlot(*np.meshgrid(lons, lats))
# Display data
map = mapPlot.contourf(x, y, data, levels=boundaries, cmap=plt.get_cmap('coolwarm'))
# Add what ever you want to your map.
mapPlot.nightshade(date, alpha=0.1)
# Legend
mapPlot.colorbar(map, label=dataUnit)
# Title
plt.title(name + ' at ' + str(level) + ' [' + str(date) + ']')
plt.show()
So it returns what I'm expecting.
With Beautifulsoup4 and python3.7 I'm trying to loop some arrays with links. After, want to get some text from tags. But I'm encountering and error passing the code on the terminal.
Here the code:
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import re
import csv
my_url = "http://www.example.com"
uClient = uReq(my_url)
page_html = uClient.read()
uClient.close()
page_soup = soup(page_html, "html.parser")
links = page_soup.select('dt > a[href]')
link = [tag.get('href') for tag in links]
i = 0
for i in range(0, 5000):
url = link[i]
Client = uReq(url)
pageHtml = Client.read()
Client.close()
pSoup = soup(pageHtml, "html.parser")
linkeas = pSoup.findAll(href=re.compile(my_url))
def linkas(href):
return href and re.compile("html").search(href) and re.compile(my_url).search(href)
linka = pSoup.findAll(href=linkas)
if linka != []:
linkia = [tag.get('href') for tag in linka]
linko = len(linkia)
j = 0
for j in range (0, linko):
curl = linkia[j]
cClient = uReq(curl)
pageHtml = cClient.read()
cClient.close()
Soup = soup(page_html, "html.parser")
country = Soup.select('.class > a:nth-of-type(3)')
countri = country[0].text.strip()
print(countri)
I've tried for days several ways but got so far as this with no results:
Traceback (most recent call last):
File "<stdin>", line 22, in <module>
IndexError: list index out of range
Could someone give some tip?
NOTE:
Arrays show like this:
print(linkia)
['http://www.example/example/1.html']
['http://www.example/example/2.html']
['http://www.example/example/3.html', 'http://www.example/example/4.html',
'http://www.example/example/5.html', 'http://www.example/example/6.html',
'http://www.example/example/7.html', 'http://www.example/example/8.html',
'http://www.example/example/9.html', 'http://www.example/example/10.html',
'http://www.example/example/11.html', 'http://www.example/example/12.html',
'http://www.example/example/13.html', 'http://www.example/example/14.html',
'http://www.example/example/15.html', 'http://www.example/example/16.html',
'http://www.example/example/17.html', 'http://www.example/example/18.html',
'http://www.example/example/19.html']
['http://www.example/example/20.html', 'http://www.example/example/example/21.html',
'http://www.example/example/example/22.html']
['http://www.example/example/23.html']
Thanks a lot for your time. Really appreciate. Will be connected all time with fast response.
change:
i = 0
for i in range(0, 5000):
url = link[i]
to just:
for url in link:
And then can get rid of the url = link[i]
You're essentially telling it to loop through 5000 items in your list, when you don't have 5000 items, hence the list index out of range. You really just want it to loop through each element until it runs out of items. And you can do that by simply saying for url in link:
Then the same for your other nested for loop.
change:
j = 0
for j in range (0, linko):
curl = linkia[j]
to:
for curl in linkia:
I will also note that if you were to set it up the way you have it, you wouldn't need to set the initial i or j to be = 0. Since you set the range/list to go from 0, 5000...the for loop would automatically start at that first element of 0. But again, that point is irrelevant, as I would not recommend iterating through your list like that. It a) isn't robust (you would need exactly 5000 items in your list every time it gets to that loop), and b) while it would work ok for your second loop because you set the range from 0, to the length of the list, it really is unnecessary since you can condense that into 1 line.
Try:
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import re
import csv
my_url = "http://www.example.com"
uClient = uReq(my_url)
page_html = uClient.read()
uClient.close()
page_soup = soup(page_html, "html.parser")
links = page_soup.select('dt > a[href]')
link = [tag.get('href') for tag in links]
for url in link:
Client = uReq(url)
pageHtml = Client.read()
Client.close()
pSoup = soup(pageHtml, "html.parser")
linkeas = pSoup.findAll(href=re.compile(my_url))
def linkas(href):
return href and re.compile("html").search(href) and re.compile(my_url).search(href)
linka = pSoup.findAll(href=linkas)
if linka != []:
linkia = [tag.get('href') for tag in linka]
for curl in linkia:
cClient = uReq(curl)
pageHtml = cClient.read()
cClient.close()
Soup = soup(page_html, "html.parser")
country = Soup.select('.class > a:nth-of-type(3)')
countri = country[0].text.strip()
print(countri)
I want to cut large audio file into different segments and store them in WAV format using pyaudio. I basically need to listen to audio and then cut the file from starting point to where i want to cut,and again start recording and cut another portion, but i am not sure how can i do it with pyaudio. Am i looking for an alternate library ?
I am new to python, any sort of help would be appreciable.
This is code, i have experimented with:
import pyaudio
import wave
import time
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
WAVE_OUTPUT_FILENAME = "output.wav"
wf = wave.open("A001017001_Edited.wav", 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
check = True;
While(check):
start = input("Do you wish to start recording?,then press ENTER")
if (start == 13):
try:
stream.start_stream()
p = time.time()
kdata = wf.readframes(CHUNK)
while len(kdata) > 0:
stream.write(kdata)
kdata = wf.readframes(CHUNK)
except KeyboardInterrupt:
q = time.time()
RECORD_SECONDS = (q-p); #gets time since wave file is played
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print(int(RATE / CHUNK * RECORD_SECONDS))
print("stopped recording")
stream.stop_stream()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
#compare if the whole audio is listened
#or not and
#if yes return false
stream.close()
p.terminate()
wf.close()
Is it possible to call Cognitive Services API in Azure ML studio when build model?” any document our sample experiment can be reference?
Thanks in advance.
Here is the sample code you can try:
import urllib2
import urllib
import sys
import base64
import json
import numpy as np
import pandas as pd
# The entry point function can contain up to two input arguments:
# Param<dataframe1>: a pandas.DataFrame
# Param<dataframe2>: a pandas.DataFrame
def azureml_main(dataframe1 = None, dataframe2 = None):
# Execution logic goes here
#print('Input pandas.DataFrame #1:\r\n\r\n{0}'.format(dataframe1))
# Account key is for Ted Way
account_key = str(dataframe2['Col1'][0])
#account_key = 'api_key'
#base_url = 'https://api.datamarket.azure.com/data.ashx/amla/text-analytics/v1'
#base_url = str(dataframe2['Col2'][0])
base_url = 'https://westus.api.cognitive.microsoft.com/'
headers = {'Content-Type':'application/json', 'Ocp-Apim-Subscription-Key':account_key}
#input_text = sys.argv[2]
sentiment_scores = []
num_examples = len(dataframe1.index)
input_texts = '{"documents":['
#for each record
for i in range(0,num_examples):
input_text = str(dataframe1['Text'][i])
input_text = input_text.replace("\"", "'")
#params = { 'Text': input_text}
input_texts = input_texts + '{"id":"' + str(i) + '","text":"'+ input_text + '"},'
input_texts = input_texts + ']}'
print input_texts
# Detect sentiment.
batch_sentiment_url = base_url + 'text/analytics/v2.0/sentiment'
req = urllib2.Request(batch_sentiment_url, input_texts, headers)
response = urllib2.urlopen(req)
result = response.read()
obj = json.loads(result)
for sentiment_analysis in obj['documents']:
sentiment_scores.append( str(sentiment_analysis['score']))
#print('Sentiment score: ' + str(obj['Score']))
sentiment_scores = pd.Series(np.array(sentiment_scores))
df1 = pd.DataFrame({'SentimentScore':sentiment_scores})
# Don't return the original text'
#frames = [dataframe1, df1]
#dataframe1 = pd.concat(frames, axis=1)
# Return value must be of a sequence of pandas.DataFrame
return df1
It is possible to execute Python snippets inside Azure ML. From there, you may call the Microsoft Cognitive Services API using a Python interface (take a look at the example for the Face API from Python).