I have tried several technics to push json object into an array and to save in the same format of the below example, but without success.
Is anyone has a solution to do it in R ?
Thanks you
EDIT :
I found the solution.
library(jsonlite)
#Set an empty list
list1 <- vector(mode = 'list', length = 2)
# data example
json_data <- list(object1 = list(birthday = '2000-02-14', Age = '20'),
object2 = list(Candidate_Number = '1999283', first_attempt = TRUE),
object3 = list(name = 'John E.', result = list(), study_hours = 150, GPA = 3.8, exam_infos = list(cost = 800, location = 'F3C6V9', past_exams = list(list(exam_name = 'Science', score = 'passed'), list(exam_name = 'Geometric', score = 'passed')))),
object4 = list(study_manual_used = 'Physics Theory', version_found = list(Digital = '1999-01-01', Paper = '1999-01-01')))
# append data into json
for(i in length(list1)){
list1[[i]] <- json_data
}
# Write to json on his home
write(toJSON(list1, auto_unbox = TRUE, pretty = TRUE), file.path(Sys.getenv()['USERPROFILE'], 'file.json'))
To save object as JSON you can use package rjson.
library("rjson")
# example data
list1 <- list()
list1[[1]] <- c(created_at="1910-02-03", id="212", field="1")
list1[[2]] <- c(created_at="1910-01-02", id="218", field="3")
# to json
toJSON(list1)
and
write(toJSON(list1), "file.json")
If the issue is that you created data.frame and need to make it back into json. You have to make it a list by row as:
# example data
dta <- as.data.frame(rbind(c(created_at="1910-02-03", id="212", field="1"), c(created_at="1910-01-02", id="218", field="3")))
# to json
toJSON(list(dta[1,],dta[2,]))
Related
I have created a working process below that allows for the graph's baseline correction for a given data set outlined below.
setwd("C:/Users/o/OneDrive/Desktop")
importData = (read.delim("OSJH103h.txt", header=F))
matrixData = as.matrix(importData)
swappedColRow = t(matrixData)
row.names(swappedColRow) = c(1,2)
removedColumn = swappedColRow[-c(1),]
matrixRemovedCol = as.matrix(removedColumn)
swappedMatrix = t(matrixRemovedCol)
bc.irls = baseline(swappedMatrix, lambda=2, hwi=100, it=10, int=2000, method = 'fillPeaks')
mf = getCorrected(bc.irls)
mf2d=data.frame(ys=mf[1,], xs=importData$V1)
par(mfrow=c(1,1))
plot(x=mf2d$xs, y=smooth(mf2d$ys), col=2, type="lines")
How would I import multiple data files that could be iterated/looped through and remove the baseline for each given dataset?
I have outlined a method for importing all the .txt files in a given directory.
temp = list.files(pattern="*.txt")
myfiles = lapply(temp, read.delim, header=FALSE)
The files are imported as [[1]], [[2]], [[3]]...
Thus replacing 'importData' for myfiles[[2]] yields the same result
Looking for a way to import ~10/15 data sets at a time and remove the baseline for each. Then ideally, export corrected data to a separate txt file.
I hope this makes sense. Any help would be appreciated.
Perhaps this:
library(baseline)
temp = list.files(pattern="*.txt")
reproc_base <- function(temp) {
importData = lapply(temp, read.delim, header=FALSE)
matrixData = lapply(importData,as.matrix)
swappedColRow = lapply(matrixData, t)
swappedColRow = lapply(swappedColRow, row.names,c(1,2)) # uncertain
# lapply(myList, function(x) { x["ID"] <- NULL; x }) SOF?12664430
removedColumn = lapply(swappedColRow, function(x) {x[1, ] <- NULL; x}) # uncertain
matrixRemovedCol = lapply(removedColumn, as.matrix)
swappedMatrix = lapply(matrixRemovedCol, t)
bc.irls = lapply(swappedMatrix, baseline, lambda=2, hwi=100, it=10, int=2000, method = 'fillPeaks')
mf = lapply(bc.irls, getCorrected)
return(mf)
}
#while debugonce(reproc_base), you'll probably just want 1 file
debugonce(reproc_base)
test_mf <- reproc_base(temp[1])
Well, as you see, there are a couple of notations that I'm uncertain about. But play with it in debugonce(reproc_base) or debug(reproc_base) and let's see where it breaks. And anonymous function SOF 12664430.
My solution in case anyone is interested
temp = list.files(pattern="*.txt", full.names = T)
myfiles = lapply(temp, read.delim, header=FALSE)
for (i in 1:length(temp)){
matrixData = as.matrix(myfiles[[i]])
swappedColRow = t(matrixData)
row.names(swappedColRow) = c(1,2)
removedColumn = swappedColRow[-c(1),]
matrixRemovedCol = as.matrix(removedColumn)
swappedMatrix = t(matrixRemovedCol)
bc.irls = baseline(swappedMatrix, lambda=2, hwi=100, it=10, int=2000, method = 'fillPeaks')
# plot(bc.irls)
mf = getCorrected(bc.irls)
mf2d=data.frame(xs=myfiles[[i]]$V1, ys=mf[1,])
par(mfrow=c(1,1))
plot(x=mf2d$xs, y=smooth(mf2d$ys), col=2, type="l")
teststr<-temp[i]
str_sub(teststr,1,2)<-""
str_sub(teststr,-4,str_length(teststr))<-""
teststr
write.csv(mf2d,paste0(teststr," BLC.csv"), row.names = FALSE)
}
I need to use the functions detrend() and chron() from the dplR package on >300 tree-ring width datasets (.rwl files), of differing lengths. Rather than copying and pasting the code for each object, I would like to do this simultaneously. After some google-ing, it looks like I need to develop a for loop, but I have not had much luck after some troubleshooting. Could someone help put me in the right direction? Below is my current code.
##read data files in
or001 <- read.rwl("or001.rwl", format = "tucson")
or002 <- read.rwl("or002.rwl", format = "tucson")
or004 <- read.rwl("or004.rwl", format = "tucson")
#detrend - negex method
or001.negex <- detrend(or001, nyrs = NULL, method = "ModNegExp", f = 0.5,
pos.slope = FALSE)
or002.negex <- detrend(or002, nyrs = NULL, method = "ModNegExp", f = 0.5,
pos.slope = FALSE)
or004.negex <- detrend(or004, nyrs = NULL, method = "ModNegExp", f = 0.5,
pos.slope = FALSE)
#build final chronology
or001.negex.crn <- chron(or001.negex, prefix = 'OR')
or002.negex.crn <- chron(or002.negex, prefix = 'OR')
or004.negex.crn <- chron(or004.negex, prefix = 'OR')
#export final chronologies
write_excel_csv(or001.negex.crn, path = "or001.negex.crn.csv")
write_excel_csv(or002.negex.crn, path = "or002.negex.crn.csv")
write_excel_csv(or004.negex.crn, path = "or004.negex.crn.csv")
Consider reading the datasets in a list and apply the same function by creating a function ('f1')
f1 <- function(file, filenm) {
dat <- read.rwl(file, format = "tucson")
negex <- detrend(dat, nyrs = NULL, method = "ModNegExp", f = 0.5,
pos.slope = FALSE)
negex.crn <- chron(negex, prefix = 'OR')
write_excel_csv(negex.crn, path = filenm)
return(negex.crn)
}
# // get all the files with the `.rwl` pattern
# // from the current working directory
files <- list.files(pattern = "\\.rwl$", full.names = TRUE)
# // change the file names by replacing the suffix with negex.crn.csv
# // loop over the files, and apply the function
nm1 <- sub("\\.rwl", "negex.crn.csv", basename(files))
Map(f1, file = files, filenm = nm1)
Working in R, I am having difficulty building a JSON file that I would use in an API call.
The required format for the JSON file can be seen here:
https://developer.trimblemaps.com/restful-apis/routing/route-reports/post-route-reports/
The input to the exercise is a dataframe like so:
Shipper_Latitude <- c(1,2,3,4)
Shipper_Longitude <- c(1,2,3,4)
r_combine.NewShipperLat <- c(1,2,3,4)
r_combine.NewShipperLon <- c(1,2,3,4)
r4 <- data.frame(Shipper_Latitude,Shipper_Longitude,r_combine.NewShipperLat,r_combine.NewShipperLon)
My attempt at building the required JSON file is as follows:
# assemble lat and long for starting location:
tempfuna <- function(Lat,Lon) {list(Coords = list(Lat = Lat,Lon = Lon))}
df_jsona <- mapply(FUN = tempfuna,Lat = r4$Shipper_Latitude, Lon = r4$Shipper_Longitude)
df_jsona <- lapply(df_jsona, function(x) {list(Coords = x)})
# assemble lat and long for ending location:
tempfunb <- function(Lat,Lon) {list(Coords = list(Lat = Lat,Lon = Lon))}
df_jsonb <- mapply(FUN = tempfunb,Lat = r4$r_combine.NewShipperLat, Lon = r4$r_combine.NewShipperLon)
df_jsonb <- lapply(df_jsonb, function(x) {list(Coords = x)})
# assemble list of ReportRoutes:
tempfunc <- function(A,B) {list(ReportRoutes = list(Stops = list(A,B)))}
df_jsonc <- mapply(FUN = tempfunc,A = df_jsona, B = df_jsonb)
# create final list:
post_body <- list(ReportRoutes = df_jsonc)
I get an error when I use the resulting file in an API call.
I think the problem is that the list items in the ReportRoutes list are incorrectly named. For example, the first item is named “Coords.ReportRoutes” instead of [[1]]].
How can I rework the above to produce the required JSON file?
I am trying to use specan function from warbleR package. I want to pass my own wav file as an argument to the function. I have seen only one example in docs which is not much self explanatory.
wave_file <- readWave("C:/Users/ABC/Downloads/file_example_WAV_1MG.wav", from = 1, to = Inf, units = c("seconds"), header = FALSE, toWaveMC = NULL)
head(wave_file)
mono_file <- mono(wave_file, which = c("both"))
head(mono_file)
auto_file <- autodetec(X = "C:/Users/ABC/Downloads/file_example_WAV_1MG.wav")
head(auto_file)
dataframe <- data.frame(list = c("sound.files", "selec", "start", "end"))
dataframe <- data.frame(wave_file, "abc", 1, Inf)
dataframe
# Existing Example found in R docs
#setwd('C:/Users/ABC/Downloads')
#data1 <- data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4", "selec.table"))
#writeWave(Phae.long1,"Phae.long1.wav")
#writeWave(Phae.long2,"Phae.long2.wav")
#writeWave(Phae.long3,"Phae.long3.wav")
#writeWave(Phae.long4,"Phae.long4.wav")
#writeWave(Phae.long1,"file_example_WAV_1MG.wav")
#writeWave(Phae.long2," ")
#writeWave(Phae.long3,"1")
#writeWave(Phae.long4,"Inf")
getwd()
#file <- specan(X = selec.table, bp = c(0, 22))
#head(file)
file <- specan(X = dataframe, bp = c(0,22))
How to give my own .wav file as argument to the specan function?
Instead of passing the actual wav file to the dataframe, pass the name of that file. So your code should look like this;
dataframe <- data.frame(list = c("sound.files", "selec", "start", "end"))
dataframe <- data.frame("file_example_WAV_1MG.wav", 2, 1, 20)
names(dataframe) <- c("sound.files", "selec", "start", "end")
a <- specan(X=dataframe, bp=c(0,22))
You can then view a. The extracted features will be stored in the dataframe. Make sure your file is stored in the working directory.
I am facing difficulties after running the code and trying to export the dataset to a spreadsheet or txt.file.
I am newbie to R, so maybe this question is trivial.
After running the following code:
eia_series <- function(api_key, series_id, start = NULL, end = NULL, num = NULL, tidy_data = "no", only_data = FALSE){
# max 100 series
# test if num is not null and either start or end is nut null. Not allowed
# api_key test for character.
# series_id test for character.
# if start/end not null, then check if format matches series id date format
# parse date and numerical data
# parse url
series_url <- httr::parse_url("http://api.eia.gov/series/")
series_url$query$series_id <- paste(series_id, collapse = ";")
series_url$query$api_key <- api_key
series_url$query$start <- start
series_url$query$end <- end
series_url$query$num <- num
# get data
series_data <- httr::GET(url = series_url)
series_data <- httr::content(series_data, as = "text")
series_data <- jsonlite::fromJSON(series_data)
# Move data from data.frame with nested list and NULL excisting
series_data$data <- series_data$series$data
series_data$series$data <- NULL
# parse data
series_data$data <- lapply(X = series_data$data,
FUN = function(x) data.frame(date = x[, 1],
value = as.numeric(x[, 2]),
stringsAsFactors = FALSE))
# add names to the list with data
names(series_data$data) <- series_data$data
# parse dates
series_data$data <- eia_date_parse(series_list = series_data$data, format_character = series_data$series$f)
# tidy up data
if(tidy_data == "tidy_long"){
series_data$data <- lapply(seq_along(series_data$data),
function(x) {cbind(series_data$data[[x]],
series_time_frame = series_data$series$f[x],
series_name = series_data$series$series_id[x],
stringsAsFactors = FALSE)})
series_data$data <- do.call(rbind, series_data$data)
}
# only data
if(only_data){
series_data <- series_data$data
}
return(series_data)
}
After running the function
eia_series(api_key = "XXX",series_id = c("PET.MCRFPOK1.M", "PET.MCRFPOK2.M"))
I tried to "transfer" the data in order to export it but got the following error:
No encoding supplied: defaulting to UTF-8.
I don't understand why. Could you help me out?
That doesn't look like an error, rather a statement. Probably coming from httr::content(series_data, as = "text"). Look in https://cran.r-project.org/web/packages/httr/vignettes/quickstart.html in The body section. It shouldn't be a problem, as long as your data returns what you expect. Otherwise you can try different encoding or there is a bug elsewhere.
Try:
series_data <- httr::content(series_data, as = "text", encoding = "UTF-8")