I am trying to convert a geojson of London local authorities into a hex cartogram where each hexagon represents one local authority. It works in R but when I try to export the generated hexgrid as geojson or topojson I get the following error:
Error in sp::SpatialPolygonsDataFrame(polys, data = input#data) :
row.names of data and Polygons IDs do not match
Here's the code. I am using geogrid to generate the grid and geojsonio to export the generated dataframe to geojson or topojson:
library(geogrid)
library(geojsonio) # version 0.9.0
df <- read_polygons(system.file("extdata", "london_LA.json", package = "geogrid"))
# you can get the json file from here: https://github.com/jbaileyh/geogrid/blob/master/inst/extdata/london_LA.json
# Set arguments for plot
par(mfrow = c(2, 3), mar = c(0, 0, 2, 0))
# Hexagonal grid with 6 seeds
for (i in 1:3) {
grid_hexagon <- calculate_grid(shape = df, learning_rate = 0.05, grid_type = "hexagonal", seed = i)
plot(grid_hexagon, main = paste("Seed", i, sep = " "))
}
# Square grid
for (i in 1:3) {
grid_square <- calculate_grid(shape = df, grid_type = "regular", seed = i)
sp::plot(grid_square, main = paste("Seed", i, sep = " "))
}
# Get a SpatialDataFrame from our desired grid
tmp <- calculate_grid(shape = df, grid_type = "hexagonal", seed = 3)
df_hex <- assign_polygons(df, tmp)
# And export to TopoJSON
topojson_write(df_hex, object_name = "local_authorities", file = "output/london_hex.json")
Any suggestions about how can I solve this issue? Also, I am interested in hearing about other approaches to generate hex cartograms giving a specific input file.
References: https://github.com/jbaileyh/geogrid
You can convert the SpatialPolygonsDataFrame to sf, and then write to GeoJSON file with st_write:
library(sf)
df_hex = st_as_sf(df_hex)
st_write(df_hex, "df_hex.geojson")
Here is the result in QGIS:
Related
I was trying to download Sentinel 2 image using sen2r() from the command line as provided in the Vignettes using the following codes
library(tidyverse)
library(sen2r)
library(sf)
#Downloading data from DIVA GIS website
get_india_map <- function(cong=113) {
tmp_file <- tempfile()
tmp_dir <- tempdir()
zp <- sprintf("http://biogeo.ucdavis.edu/data/diva/adm/IND_adm.zip",cong)
download.file(zp, tmp_file)
unzip(zipfile = tmp_file, exdir = tmp_dir)
fpath <- paste(tmp_dir)
st_read(fpath, layer = "IND_adm2")
}
ind <- get_india_map(114)
#To view the attributes & first 3 attribute values of the data
ind[1:3,]
#Selecting specific district
Delhi <- ind %>%
filter(NAME_1=="Delhi") %>%
mutate(DISTRICT = as.character(NAME_2)) %>%
select(DISTRICT)
plot(Delhi)
# Set paths
out_dir_1 <- tempfile(pattern = "sen2r_out_1_") # output folder
safe_dir <- tempfile(pattern = "sen2r_safe_") # folder to store downloaded SAFE
out_paths_1 <- sen2r(
gui = FALSE,
step_atmcorr = "l2a",
extent = Delhi,
timewindow = c(as.Date("2020-11-13"), as.Date("2020-11-25")),
list_prods = c("BOA","SCL"),
list_indices = c("NDVI","MSAVI2"),
list_rgb = c("RGB432B"),
mask_type = "cloud_and_shadow",
max_mask = 10,
path_l2a = safe_dir,
path_out = out_dir_1
)
But it returns me following error
Error in readRDS(s2tiles_rds) : unknown input format
How to solve this issue?
I need to use the functions detrend() and chron() from the dplR package on >300 tree-ring width datasets (.rwl files), of differing lengths. Rather than copying and pasting the code for each object, I would like to do this simultaneously. After some google-ing, it looks like I need to develop a for loop, but I have not had much luck after some troubleshooting. Could someone help put me in the right direction? Below is my current code.
##read data files in
or001 <- read.rwl("or001.rwl", format = "tucson")
or002 <- read.rwl("or002.rwl", format = "tucson")
or004 <- read.rwl("or004.rwl", format = "tucson")
#detrend - negex method
or001.negex <- detrend(or001, nyrs = NULL, method = "ModNegExp", f = 0.5,
pos.slope = FALSE)
or002.negex <- detrend(or002, nyrs = NULL, method = "ModNegExp", f = 0.5,
pos.slope = FALSE)
or004.negex <- detrend(or004, nyrs = NULL, method = "ModNegExp", f = 0.5,
pos.slope = FALSE)
#build final chronology
or001.negex.crn <- chron(or001.negex, prefix = 'OR')
or002.negex.crn <- chron(or002.negex, prefix = 'OR')
or004.negex.crn <- chron(or004.negex, prefix = 'OR')
#export final chronologies
write_excel_csv(or001.negex.crn, path = "or001.negex.crn.csv")
write_excel_csv(or002.negex.crn, path = "or002.negex.crn.csv")
write_excel_csv(or004.negex.crn, path = "or004.negex.crn.csv")
Consider reading the datasets in a list and apply the same function by creating a function ('f1')
f1 <- function(file, filenm) {
dat <- read.rwl(file, format = "tucson")
negex <- detrend(dat, nyrs = NULL, method = "ModNegExp", f = 0.5,
pos.slope = FALSE)
negex.crn <- chron(negex, prefix = 'OR')
write_excel_csv(negex.crn, path = filenm)
return(negex.crn)
}
# // get all the files with the `.rwl` pattern
# // from the current working directory
files <- list.files(pattern = "\\.rwl$", full.names = TRUE)
# // change the file names by replacing the suffix with negex.crn.csv
# // loop over the files, and apply the function
nm1 <- sub("\\.rwl", "negex.crn.csv", basename(files))
Map(f1, file = files, filenm = nm1)
Working in R, I am having difficulty building a JSON file that I would use in an API call.
The required format for the JSON file can be seen here:
https://developer.trimblemaps.com/restful-apis/routing/route-reports/post-route-reports/
The input to the exercise is a dataframe like so:
Shipper_Latitude <- c(1,2,3,4)
Shipper_Longitude <- c(1,2,3,4)
r_combine.NewShipperLat <- c(1,2,3,4)
r_combine.NewShipperLon <- c(1,2,3,4)
r4 <- data.frame(Shipper_Latitude,Shipper_Longitude,r_combine.NewShipperLat,r_combine.NewShipperLon)
My attempt at building the required JSON file is as follows:
# assemble lat and long for starting location:
tempfuna <- function(Lat,Lon) {list(Coords = list(Lat = Lat,Lon = Lon))}
df_jsona <- mapply(FUN = tempfuna,Lat = r4$Shipper_Latitude, Lon = r4$Shipper_Longitude)
df_jsona <- lapply(df_jsona, function(x) {list(Coords = x)})
# assemble lat and long for ending location:
tempfunb <- function(Lat,Lon) {list(Coords = list(Lat = Lat,Lon = Lon))}
df_jsonb <- mapply(FUN = tempfunb,Lat = r4$r_combine.NewShipperLat, Lon = r4$r_combine.NewShipperLon)
df_jsonb <- lapply(df_jsonb, function(x) {list(Coords = x)})
# assemble list of ReportRoutes:
tempfunc <- function(A,B) {list(ReportRoutes = list(Stops = list(A,B)))}
df_jsonc <- mapply(FUN = tempfunc,A = df_jsona, B = df_jsonb)
# create final list:
post_body <- list(ReportRoutes = df_jsonc)
I get an error when I use the resulting file in an API call.
I think the problem is that the list items in the ReportRoutes list are incorrectly named. For example, the first item is named “Coords.ReportRoutes” instead of [[1]]].
How can I rework the above to produce the required JSON file?
I am trying to create a map of all school districts in each state. The code below works for all states, except in Florida I get this error:
Error in data.frame(..., check.names = FALSE) :
arguments imply differing number of rows: 67, 121
require(dplyr)
require(sf)
library(tmap)
require(lwgeom)
temp <- tempfile() ### create a temporary file to download zip file to
temp2 <- tempfile() ### create a temporary file to put unzipped files in
download.file("https://s3.amazonaws.com/data.edbuild.org/public/Processed+Data/SD+shapes/2018/shapefile_1718.zip", temp) # downloading the data into the tempfile
unzip(zipfile = temp, exdir = temp2) # unzipping the temp file and putting unzipped data in temp2
filename <- list.files(temp2, full.names = TRUE) # getting the filename of the downloaded data
shp_file <- filename %>%
subset(grepl("*.shp$", filename)) ## selecting only the .shp file to read in
state_shape <- sf::st_read(shp_file) %>% ## reading in the downloaded data
dplyr::mutate(GEOID = as.character(GEOID),
GEOID = stringr::str_pad(GEOID, width = 7, pad = "0")) %>%
filter(State == "Florida")
url = "https://s3.amazonaws.com/data.edbuild.org/public/Processed+Data/Master/2017/full_data_17_geo_exc.csv"
master <- read.csv(file = url, stringsAsFactors = FALSE) %>%
dplyr::mutate(NCESID = as.character(NCESID),
NCESID = stringr::str_pad(NCESID, width = 7, pad = "0"),
year = "2017") %>%
dplyr::select(-NAME, -State, -STATE_FIPS) ## removing variables that duplicate with shapes
state_shape <- state_shape %>%
dplyr::left_join(master, by = c("GEOID" = "NCESID")) %>%
select(GEOID, NAME, State, StPovRate)
shape.clean <- lwgeom::st_make_valid(state_shape) # making all geometries valid
povertyBlues <- c('#dff3fe', '#92DCF0', '#49B4D6', '#2586a5', '#19596d')
map <- tm_shape(shape.clean) +
tm_fill("StPovRate", breaks=c(0, .1, .2, .3, .4, 1), title = "Student Poverty",
palette = povertyBlues,
legend.format=list(fun=function(x) paste0(formatC(x*100, digits=0, format="f"), " %"))) +
tm_shape(shape.clean) +
tm_borders(lwd=.25, col = "#e9e9e9", alpha = 1) +
tm_layout(inner.margins = c(.05,.25,.1,.05))
map ### view the map
The length of the tm_shape$shp and state_shape are both 67. Does anyone know what could be causing the "arguments imply differing number of rows: 67, 121"?
Thanks!!
I was not able to get these shapes to print using tmap, but I was able to manually remove the problems points and lines so that they don't trigger errors in tmap. Both Florida and Nebraska had geometry collections in them, so I used the following script to remove any line or points and change the geometry collections to multipolygons. I am sure there is a better way, and would be happy to hear them if others have a more elegant solution. This, at least, allows me to move on!
### Create an st_is function that works for many types
st_is = function(x, type) UseMethod("st_is")
st_is.sf = function(x, type)
st_is(st_geometry(x), type)
st_is.sfc = function(x, type)
vapply(x, sf:::st_is.sfg, type, FUN.VALUE = logical(1))
st_is.sfg = function(x, type)
class(x)[2L] %in% type
####### Correct Florida #########
#### import my florida file
temp <- tempfile() ### create a temporary file to download zip file to
temp2 <- tempfile() ### create a temporary file to put unzipped files in
download.file("https://s3.amazonaws.com/data.edbuild.org/public/Processed+Data/SD+shapes/2018/shapefile_1718.zip", temp) # downloading the data into the tempfile
unzip(zipfile = temp, exdir = temp2) # unzipping the temp file and putting unzipped data in temp2
filename <- list.files(temp2, full.names = TRUE) # getting the filename of the downloaded data
shp_file <- filename %>%
subset(grepl("*.shp$", filename)) ## selecting only the .shp file to read in
florida <- sf::st_read(shp_file) %>% ## reading in the downloaded data
filter(State == "Florida")
#### extract polygon shapes from any geometry collection
solution <- for (i in florida) {
for(j in seq_along(i)) {
if (class(i[[j]]) != "character" & class(i[[j]]) != "double" & class(i[[j]]) != "numeric") {
if (st_is.sf(i[[j]], c("GEOMETRYCOLLECTION"))) {
i[[j]] <- st_collection_extract(i[[j]], type = c("POLYGON"))
}
else {
next
}
}
else {
next
}
}
}
florida_clean <- florida
st_geometry(florida_clean) <- NULL #### remove geometry from original florida
sfc_geo <- sf::st_sfc(i) #### define i as an sfc
florida_clean$geometry <- sfc_geo #### attach i to florida
florida_clean <- sf::st_set_geometry( florida_clean, sfc_geo ) ### set florida's geometry as i, with the points and lines removed
I just had a similar issue and managed to address it with the trick explained here
https://www.r-spatial.org/r/2017/03/19/invalid.html
namely applying a buffer of 0.0 to the shapes that are invalid
p[which(st_is_valid(p)== FALSE),]= st_buffer(p[which(st_is_valid(p)== FALSE),], 0.0)
where p is the layer in question. I hope this helps.
I had a similar issue relating to GEOMETRYCOLLECTIONs mixed in with other geometry types (in my case, MULTIPOLYGONs), producing the same error re: differing numbers of rows. By unpacking the GEOMETRYCOLLECTIONs to POLYGONs, and then casting to MULTIPOLYGON, I got a uniform spatial file comprising only MULTIPOLYGONs without dropping the features contained in the GEOMETRYCOLLECTIONs, and without the tmap error. Something akin to:
florida <- sf::st_read(shp_file) %>% ## reading in the downloaded data
filter(State == "Florida") %>%
st_collection_extract(type = "POLYGON") %>% ##unpacking into POLYGON-type geometries
st_cast ##type-casting (in this case, st_cast automatically casts to MULTIPOLYGONs)
I am trying to use specan function from warbleR package. I want to pass my own wav file as an argument to the function. I have seen only one example in docs which is not much self explanatory.
wave_file <- readWave("C:/Users/ABC/Downloads/file_example_WAV_1MG.wav", from = 1, to = Inf, units = c("seconds"), header = FALSE, toWaveMC = NULL)
head(wave_file)
mono_file <- mono(wave_file, which = c("both"))
head(mono_file)
auto_file <- autodetec(X = "C:/Users/ABC/Downloads/file_example_WAV_1MG.wav")
head(auto_file)
dataframe <- data.frame(list = c("sound.files", "selec", "start", "end"))
dataframe <- data.frame(wave_file, "abc", 1, Inf)
dataframe
# Existing Example found in R docs
#setwd('C:/Users/ABC/Downloads')
#data1 <- data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4", "selec.table"))
#writeWave(Phae.long1,"Phae.long1.wav")
#writeWave(Phae.long2,"Phae.long2.wav")
#writeWave(Phae.long3,"Phae.long3.wav")
#writeWave(Phae.long4,"Phae.long4.wav")
#writeWave(Phae.long1,"file_example_WAV_1MG.wav")
#writeWave(Phae.long2," ")
#writeWave(Phae.long3,"1")
#writeWave(Phae.long4,"Inf")
getwd()
#file <- specan(X = selec.table, bp = c(0, 22))
#head(file)
file <- specan(X = dataframe, bp = c(0,22))
How to give my own .wav file as argument to the specan function?
Instead of passing the actual wav file to the dataframe, pass the name of that file. So your code should look like this;
dataframe <- data.frame(list = c("sound.files", "selec", "start", "end"))
dataframe <- data.frame("file_example_WAV_1MG.wav", 2, 1, 20)
names(dataframe) <- c("sound.files", "selec", "start", "end")
a <- specan(X=dataframe, bp=c(0,22))
You can then view a. The extracted features will be stored in the dataframe. Make sure your file is stored in the working directory.