Python ArcPy - Print Layer with highest field value - python-3.6

I have some python code that goes through layers in my ArcGIS project and prints out the layer names and their corresponding highest value within the field "SUM_USER_VisitCount".
Output Picture
What I want the code to do is only print out the layer name and SUM_USER_VisitCount field value for the one layer with the absolute highest value.
Desired Output
I have been unable to figure out how to achieve this and can't find anything online either. Can someone help me achieve my desired output?
Sorry if the code layout is a little weird. It got messed up when I pasted it into the "code sample"
Here is my code:
import arcpy
import datetime
from datetime import timedelta
import time
#Document Start Time in-order to calculate Run Time
time1 = time.clock()
#assign project and map frame
p =
arcpy.mp.ArcGISProject(r'E:\arcGIS_Shared\Python\CumulativeHeatMaps.aprx')
m = p.listMaps('Map')[0]
Markets = [3000]
### Centers to loop through
CA_Centers = ['Castro', 'ColeValley', 'Excelsior', 'GlenPark',
'LowerPacificHeights', 'Marina', 'NorthBeach', 'RedwoodCity', 'SanBruno',
'DalyCity']
for Market in Markets:
print(Market)
for CA_Center in CA_Centers:
Layers =
m.listLayers("CumulativeSumWithin{0}_{1}_Jun2018".format(Market,CA_Center))
fields = ['SUM_USER_VisitCount']
for Layer in Layers:
print(Layer)
sqlClause = (None, 'ORDER BY ' + 'SUM_USER_VisitCount') # + 'DESC'
with arcpy.da.SearchCursor(in_table = Layer, field_names = fields,
sql_clause = sqlClause) as searchCursor:
print (max(searchCursor))

You can create a dictonary that stores the results from each query and then print out the highest one at the end.
results_dict = {}
for Market in Markets:
print(Market)
for CA_Center in CA_Centers:
Layers =
m.listLayers("CumulativeSumWithin{0}_{1}_Jun2018".format(Market,CA_Center))
fields = ['SUM_USER_VisitCount']
for Layer in Layers:
print(Layer)
sqlClause = (None, 'ORDER BY ' + 'SUM_USER_VisitCount') # + 'DESC'
with arcpy.da.SearchCursor(in_table = Layer, field_names = fields,
sql_clause = sqlClause) as searchCursor:
print (max(searchCursor))
results_dict[Layer] = max(searchCursor)
# get key for dictionary item with the highest value
highest_count_layer = max(results_dict, key=results_dict.get)
print(highest_count_layer)
print(results_dict[highest_count_layer])

Related

Extract one band data from Geotiff file and plot colored image based on one band values on folium map

I am new to extracting data from Geotiff files.So this question will sound obvious.I have tiff file which I have got from api.It has 12 bands.This shows definition of what each band contains
What I want
After extenisve searching for days i am only able to do this.
enter image description here
Code which i have tried
driver=gdal.GetDriverByName('GTiFF')
driver.Register()
ds = gdal.Open('...../286d7a11-4abd-449e-856b-79959dfd1396.tif')
if ds is None:
print('Could not open')
geotransform = ds.GetGeoTransform()
proj = ds.GetProjection()
cols = ds.RasterXSize
rows = ds.RasterYSize
xmin=geotransform[0]
ymax=geotransform[3]
xmax=xmin+cols*geotransform[1]
ymin=ymax+rows*geotransform[5]
centerx=(xmin+xmax)/2
centery=(ymin+ymax)/2
bands = ds.RasterCount
band10 = ds.GetRasterBand(10)
array = band10.ReadAsArray()
map= folium.Map(location=[centery, centerx], zoom_start=7,tiles='openstreetmap')
image = folium.raster_layers.ImageOverlay(
dataimage,opacity=0.8, bounds=[[ymin,xmin],[ymax,xmax]]
)
image.add_to(map)
map.fit_bounds(map.get_bounds(), padding=(10, 10))
return map

Warp10 and streamlit integration?

Two simple questions:
Does Warp10 integrate into streamlit to feed visualisations?
If so, please would you specify how this can be accomplished?
Thanking you in advance.
Best wishes,
There's no direct integration of Warp 10 in streamlit.
Although streamlit can handle any kind of data, it's mainly focused on pandas DataFrame. DataFrames are tables whereas Warp 10 Geo Time Series are time series. So even if Warp 10 was integrated in streamlit, it would require some code to properly format the data for streamlit to give its full potential.
That being said, here is a small example on how to display data stored in Warp 10 with streamlit:
import json
from datetime import datetime, timedelta
import requests
import streamlit as st
from bokeh.palettes import Category10_10 as palette
from bokeh.plotting import figure
# Should be put in a configuration file.
fetch_endpoint = 'http://localhost:8080/api/v0/fetch'
token = 'READ' # Change that to your actual token
def load_data_as_json(selector, start, end):
headers = {'X-Warp10-Token': token}
params = {'selector': selector, 'start': start, 'end': end, 'format': 'json'}
r = requests.get(fetch_endpoint, params=params, headers=headers)
return r.text
st.title('Warp 10 Test')
# Input parameters
selector = st.text_input('Selector', value="~streamlit.*{}")
start_date = st.date_input('Start date', value=datetime.now() - timedelta(days=10))
start_time = st.time_input('Start time')
end_date = st.date_input('End date')
end_time = st.time_input('End time')
# Convert datetime.dates and datetime.times to microseconds (default time unit in Warp 10)
start = int(datetime.combine(start_date, start_time).timestamp()) * 1000000
end = int(datetime.combine(end_date, end_time).timestamp()) * 1000000
# Make the query to Warp 10 and get back a json.
json_data = load_data_as_json(selector, start, end)
gtss = json.loads(json_data)
# Iterate through the json and populate a Bokeh graph.
p = figure(title='GTSs', x_axis_label='time', y_axis_label='value')
for gts_index, gts in enumerate(gtss):
tss = []
vals = []
for point in gts['v']:
tss.append(point[0])
vals.append(point[-1])
p.line(x=tss, y=vals, legend_label=gts['c'] + json.dumps(gts['l']), color=palette[gts_index % len(palette)])
st.bokeh_chart(p, use_container_width=True)
# Also display the json.
st.json(json_data)

How to import data from a HTML table on a website to excel?

I would like to do some statistical analysis with Python on the live casino game called Crazy Time from Evolution Gaming. There is a website that has the data to do this: https://tracksino.com/crazytime. I want the data of the lowest table 'Spin History' to be imported into excel. However, I do not now how this can be done. Could anyone give me an idea where to start?
Thanks in advance!
Try the below code:
import json
import requests
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
import csv
import datetime
def scrap_history():
csv_headers = []
file_path = '' #mention your system where you have to save the file
file_name = 'spin_history.csv' # filename
page_number = 1
while True:
#Dynamic URL fetching data in chunks of 100
url = 'https://api.tracksino.com/crazytime_history?filter=&sort_by=&sort_desc=false&page_num=' + str(page_number) + '&per_page=100&period=24hours'
print('-' * 100)
print('URL created : ',url)
response = requests.get(url,verify=False)
result = json.loads(response.text) # loading data to convert in JSON.
history_data = result['data']
print(history_data)
if history_data != []:
with open(file_path + file_name ,'a+') as history:
#Headers for file
csv_headers = ['Occured At','Slot Result','Spin Result','Total Winners','Total Payout',]
csvwriter = csv.DictWriter(history, delimiter=',', lineterminator='\n',fieldnames=csv_headers)
if page_number == 1:
print('Writing CSV header now...')
csvwriter.writeheader()
#write exracted data in to csv file one by one
for item in history_data:
value = datetime.datetime.fromtimestamp(item['when'])
occured_at = f'{value:%d-%B-%Y # %H:%M:%S}'
csvwriter.writerow({'Occured At':occured_at,
'Slot Result': item['slot_result'],
'Spin Result': item['result'],
'Total Winners': item['total_winners'],
'Total Payout': item['total_payout'],
})
print('-' * 100)
page_number +=1
print(page_number)
print('-' * 100)
else:
break
Explanation:
I have implemented the above script using python requests way. The API url https://api.tracksino.com/crazytime_history?filter=&sort_by=&sort_desc=false&page_num=1&per_page=50&period=24hours extarcted from the web site itself(refer screenshot). In the very first step script will take the dynamic URL where page number is dynamic and changed upon on every iteration. For ex:- first it will be page_num = 1 then page_num = 2 and so on till all the data will get extracted.

Plotting multiple states bokeh

I'm trying to plot two things:
Counties for MD, VA, and DC
Lats/Longs for housing listings in that area (most expensive in a certain color
and least expensive in another color)
However, I'm having trouble with part 1, and cannot seem to plot the counties without getting the error message:
"Javascript error adding output!
Error: Error rendering Bokeh model: could not find tag with id: xxxxxxxxxxx
See your browser Javascript console for more details."
where xxxxx are numbers
from bokeh.io import show
from bokeh.models import (
ColumnDataSource,
HoverTool,
LogColorMapper)
from bokeh.palettes import Viridis6 as palette
from bokeh.plotting import figure
from bokeh.sampledata.us_counties import data as counties
from bokeh.sampledata.unemployment import data as unemployment
palette.reverse()
va_counties = {
code: county for code, county in counties.items() if county["state"] == "va"
}
md_counties = {
code: county for code, county in counties.items() if county["state"] == "md"
}
dc_counties = {
code: county for code, county in counties.items() if county["state"] == "dc"
}
va_county_xs = [county["lons"] for county in va_counties.values()]
va_county_ys = [county["lats"] for county in va_counties.values()]
md_county_xs = [county["lons"] for county in md_counties.values()]
md_county_ys = [county["lats"] for county in md_counties.values()]
dc_county_xs = [county["lons"] for county in dc_counties.values()]
dc_county_ys = [county["lats"] for county in dc_counties.values()]
va_county_names = [county['name'] for county in va_counties.values()]
md_county_names = [county['name'] for county in md_counties.values()]
dc_county_names = [county['name'] for county in dc_counties.values()]
#county_rates = [unemployment[county_id] for county_id in counties]
color_mapper = LogColorMapper(palette=palette)
va_source = ColumnDataSource(data=dict(
x=va_county_xs,
y=va_county_ys,
name=va_county_names,
))
md_source = ColumnDataSource(data=dict(
x=md_county_xs,
y=md_county_ys,
name=md_county_names,
))
dc_source = ColumnDataSource(data=dict(
x=dc_county_xs,
y=dc_county_ys,
name=dc_county_names,
))
TOOLS = "pan,wheel_zoom,reset,hover,save"
va = figure(
title="Texas Unemployment, 2009", tools=TOOLS,
x_axis_location=None, y_axis_location=None
)
va.grid.grid_line_color = None
md = figure(
title="Texas Unemployment, 2009", tools=TOOLS,
x_axis_location=None, y_axis_location=None
)
md.grid.grid_line_color = None
dc = figure(
title="Texas Unemployment, 2009", tools=TOOLS,
x_axis_location=None, y_axis_location=None
)
dc.grid.grid_line_color = None
va.patches('x', 'y', source=va_source,
fill_color={'field': 'rate', 'transform': color_mapper},
fill_alpha=0.7, line_color="white", line_width=0.5)
md.patches('x', 'y', source=md_source,
fill_color={'field': 'rate', 'transform': color_mapper},
fill_alpha=0.7, line_color="white", line_width=0.5)
dc.patches('x', 'y', source=dc_source,
fill_color={'field': 'rate', 'transform': color_mapper},
fill_alpha=0.7, line_color="white", line_width=0.5)
hover = p.select_one(HoverTool)
hover.point_policy = "follow_mouse"
hover.tooltips = [
("Name", "#name"),
("(Long, Lat)", "($x, $y)"),
]
show(va)
show(md)
show(dc)
Bokeh maintains an implicit "current document" and by default everything you make gets added to that. This makes it very simple to have a script that generates one HTML file, and especially makes usage in the Jupyter notebook much more simple and transparent. In general this is a net positive, but it does make certain other usage patterns need to be slightly more verbose. In particular in your case with multiple show calls, each call to show gets the same document which is probably not what you intend. The solution is to create and show each plot in order, and call reset_output in between. Additionally, you would really need to specify unique filenames for each separate output, by using output_file. Here is a small complete example:
from bokeh.io import output_file, reset_output, show
from bokeh.plotting import figure
# create and show one figure
p1 = figure(title="plot 1")
p1.circle([1,2,3], [4,6,5])
output_file("p1.html")
show(p1)
# clear out the "current document"
reset_output()
# create and show another figure
p2 = figure(title="plot 2")
p2.circle([1,2,3], [8,6,7])
output_file("p2.html")
show(p2)
There are other ways to do this by managing Bokeh Document objects explicitly yourself, but I would probably say this is the simplest to get started.

IndexError: list index out of range, scores.append( (fields[0], fields[1]))

I'm trying to read a file and put contents in a list. I have done this mnay times before and it has worked but this time it throws back the error "list index out of range".
the code is:
with open("File.txt") as f:
scores = []
for line in f:
fields = line.split()
scores.append( (fields[0], fields[1]))
print(scores)
The text file is in the format;
Alpha:[0, 1]
Bravo:[0, 0]
Charlie:[60, 8, 901]
Foxtrot:[0]
I cant see why it is giving me this problem. Is it because I have more than one value for each item? Or is it the fact that I have a colon in my text file?
How can I get around this problem?
Thanks
If I understand you well this code will print you desired result:
import re
with open("File.txt") as f:
# Let's make dictionary for scores {name:scores}.
scores = {}
# Define regular expressin to parse team name and team scores from line.
patternScore = '\[([^\]]+)\]'
patternName = '(.*):'
for line in f:
# Find value for team name and its scores.
fields = re.search(patternScore, line).groups()[0].split(', ')
name = re.search(patternName, line).groups()[0]
# Update dictionary with new value.
scores[name] = fields
# Print output first goes first element of keyValue in dict then goes keyName
for key in scores:
print (scores[key][0] + ':' + key)
You will recieve following output:
60:Charlie
0:Alpha
0:Bravo
0:Foxtrot

Resources