LIST to data.frame in XML file - r

I am working on XML files and I am trying to transform them into data.frame. However, during the transformation process the file is “LIST”, as seen below:
My Code:
require(tidyverse)
require(xml2)
page<-read_xml('<?xml version="1.0" encoding="ISO-8859-1" ?>
<test2:TASS xmlns="http://www.vvv.com/schemas"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.vvv.com/schemas http://www.vvv.com/schemas/testV2_02_03.xsd"
xmlns:test2="http://www.vvv.com/schemas" >
<test2:billing>
<test2:proceduresummary>
<test2:guidenumber>Z4088</test2:guidenumber>
<test2:diagnosis>
<test2:table>ICD-10</test2:table>
<test2:diagnosiscod>G93</test2:diagnosiscod>
<test2:description>DISORDER OF BRAIN, UNSPECIFIED</test2:description>
</test2:diagnosis>
<test2:procedure>
<test2:procedure>
<test2:description>HOSPITAL</test2:description>
</test2:procedure>
<test2:amount>15</test2:amount>
</test2:procedure>
</test2:proceduresummary>
</test2:billing>
</test2:TASS>')
t1<-if ("test2" %in% names(xml_ns(page))) {
ns<-xml_ns_rename(xml_ns(page), test2 = "test")
} else {
ns<- xml_ns(page)
}
MYFILE<- ifelse(names(xml_ns(page)) %in% "d1",
page %>% xml_find_all(".//d1:billing"),
page %>% xml_find_all(".//test:billing", ns))
MYFILE<-xml2::as_list(MYFILE) %>% jsonlite::toJSON() %>% jsonlite::fromJSON()
My "LIST"
**List of 1
$ :List of 2
..$ node:<externalptr>
..$ doc :<externalptr>
..- attr(*, "class")= chr "xml_node"**
I'm using the code below to transform it, but it's giving an error:
MYFILE <- xml2 :: as_list (MYFILE)%>% jsonlite :: toJSON ()%>% jsonlite :: fromJSON ()
This is the error.
Error in UseMethod("as_list") :
no applicable method for 'as_list' applied to an object of class "list"
How do I turn it into data.frame/tibble?

It looks like the ifelse statement is causing the file to be parsed three times. This is causing a problem. If you need this line try this instead ifelse("d1" %in% names(xml_ns(page)), . . .
This script works on the above sample. If there are more than 1 billing node then part of the below script will need modification. I highlighted that in the comments.
t1<-if ("test2" %in% names(xml_ns(page))) {
ns<-xml_ns_rename(xml_ns(page), test2 = "test")
} else {
ns<- xml_ns(page)
}
MYFILE<- ifelse(names(xml_ns(page)) %in% "d1",
page %>% xml_find_all(".//d1:billing"),
page %>% xml_find_all(".//test:billing", ns))
#To prevent repeating reading the file multiple times
# MYFILE<- if ("d1" %in% names(xml_ns(page))) {
# page %>% xml_find_all(".//d1:billing")
# } else {
# page %>% xml_find_all(".//test:billing", ns)
# }
OUTPUT<-lapply(MYFILE, function(MYFILE){
#convert all of the nodes to named vector
output<-as_list(MYFILE) %>% unlist()
#Shorten the names
names(output) <- gsub("^(.+?\\.)", "", names(output))
#depending on your next steps will determine the disired output
#create a long format dataframe
# long_answer<-data.frame(Name=names(output), output, row.names = NULL)
#create a wide format dataframe
wide_answer<-data.frame( t(output))
})
bind_rows(OUTPUT)

Related

Choose command order in a function based on an error [R]

I have three files in a folder with the following names:
./multiqc_data$ ls
file1.json
file2.json
file3.json
When I open the files with the TidyMultiqc package existing NA values in the files might lead to the following error:
files <- dir(path,pattern = "*.json") #locate files
files %>%
map(~ load_multiqc(file.path(path, .))) #parse them
## the error
Error in parse_con(txt, bigint_as_char) :
lexical error: invalid char in json text.
"mapped_failed_pct": NaN, "paired in
(right here) ------^
I want to create a function to handle this error.
I want every time this error pops up to be able to apply this sed function in all files of the folder.
system(paste("gsed -i 's/NaN/null/g'",paste0(path,"*.json")))
Any ideas how can I achieve this
You could use this wrapper :
safe_load_multiqc <- function(path, file) {
tryCatch(load_multiqc(file.path(path, file)), error = function(e) {
system(paste("gsed -i 's/NaN/null/g'",paste0(path,"*.json")))
# retry
load_multiqc(path, file)
})
}
A good way to handle errors in work pipelines like that is using restarts and withCallingHandlers and withRestarts.
You establish the condition handlers and the recovery protocols (restarts) then you can choose what protocols to use and in which order. Calling handlers allows a much finer control on error conditions than common try-catch.
In the example, I wrote two handlers: removeNaNs (works at folder level) and skipFile (works at file level), if the first fails, the second is executed (simply skipping the file). Of course is an example
I think in your case you can simply run sed in every case, nevertheless, I hope this answer meet your looking for a canonical way
Inspiration and Extra lecture: Beyond Exception Handling: Conditions and Restarts
path <- "../your_path"
# function that does the error_prone task
do_task <- function(path){
files <- dir(path,pattern = "*.json") #locate files
files %>%
map(~ withRestart( # set an alternative restart
load_multiqc(file.path(path, .)), # parsing
skipFile = function() { # if fails, skip only this file
message(paste("skipping ", file.path(path, .)))
return(NULL)
}))
}
# error handler that invokes "removeNaN"
removeNaNHandler <- function(e) tryInvokeRestart("removeNaN")
# error handler that invokes "skipFile"
skipFileHandler <- function(e) tryInvokeRestart("skipFile")
# run the task with handlers in case of error
withCallingHandlers(
condition = removeNaNHandler, # call handler (on generic error)
# condition = skipFileHandler, # if previous fails skips file
{
# run with recovery protocols (can define more than one)
withRestarts({
do_task(path)},
removeNaN = function() # protocol "removeNaN"
{
system(paste("gsed -i 's/NaN/null/g'",paste0(path,"*.json")))
do_task(path) # try again
}
)
}
)
Based on this open github issue, a potential solution provided by Peter Diakumis is to use RJSONIO::fromJSON() in place of jsonlite::read_json(). You could adapt this solution to your use-case by e.g. creating your own load_multiqc() function:
library(RJSONIO)
load_multiqc_bugfix <- function(paths,
plots = NULL,
find_metadata = function(...) {
list()
},
plot_parsers = list(),
sections = "general") {
assertthat::assert_that(all(sections %in% c(
"general", "plot", "raw"
)), msg = "Only 'general', 'plot' and 'raw' (and combinations of those) are valid items for the sections parameter")
# Vectorised over paths
paths %>%
purrr::map_dfr(function(path) {
parsed <- RJSONIO::fromJSON(path)
# The main data is plots/general/raw
main_data <- sections %>%
purrr::map(~ switch(.,
general = parse_general(parsed),
raw = parse_raw(parsed),
plot = parse_plots(parsed, plots = plots, plot_parsers = plot_parsers)
)) %>%
purrr::reduce(~ purrr::list_merge(.x, !!!.y), .init = list()) %>%
purrr::imap(~ purrr::list_merge(.x, metadata.sample_id = .y))
# Metadata is defined by a user function
metadata <- parse_metadata(parsed = parsed, samples = names(main_data), find_metadata = find_metadata)
purrr::list_merge(metadata, !!!main_data) %>%
dplyr::bind_rows()
}) %>%
# Only arrange the columns if we have at least 1 column
`if`(
# Move the columns into the order: metadata, general, plot, raw
ncol(.) > 0,
(.) %>%
dplyr::relocate(dplyr::starts_with("raw")) %>%
dplyr::relocate(dplyr::starts_with("plot")) %>%
dplyr::relocate(dplyr::starts_with("general")) %>%
dplyr::relocate(dplyr::starts_with("metadata")) %>%
# Always put the sample ID at the start
dplyr::relocate(metadata.sample_id),
.
)
}

Parallel package for windows 10 in R

I have this dataset that I'm trying to parse in R. The data from HMDB and the dataset name is Serum Metabolites (in a format of xml file). The xml file contains about 25K metabolites nodes, each I want to parse to sub-nodes
I have a code that parses the XML file to a list object in R.
Since the XML file is quite big and since for each metabolite there are about 12 sub-nodes I want, It takes a long time to parse the file. about 3 hours to 1,000 metabolites.
I'm trying to use the package parallel but receive and error.
The packages:
library("XML")
library("xml2")
library( "magrittr" ) #for pipe operator %>%
library("pbapply") # to track on progress
library("parallel")
The function:
# The function receives an XML file (its location) and returns a list of nodes
Short_Parser_HMDB <- function(xml.file_location){
start.time<- Sys.time()
# Read as xml file
doc <- read_xml( xml.file_location )
#get metabolite nodes (only first three used in this sample)
met.nodes <- xml_find_all( doc, ".//d1:metabolite" ) [1:1000] # [(i*1000+1):(1000*i+1000)] # [1:3]
#list of data.frame
xpath_child.v <- c( "./d1:accession",
"./d1:name" ,
"./d1:description",
"./d1:synonyms/d1:synonym" ,
"./d1:chemical_formula" ,
"./d1:smiles" ,
"./d1:inchikey" ,
"./d1:biological_properties/d1:pathways/d1:pathway/d1:name" ,
"./d1:diseases/d1:disease/d1:name" ,
"./d1:diseases/d1:disease/d1:references",
"./d1:kegg_id" ,
"./d1:meta_cyc_id"
)
child.names.v <- c( "accession",
"name" ,
"description" ,
"synonyms" ,
"chemical_formula" ,
"smiles" ,
"inchikey" ,
"pathways_names" ,
"diseases_name",
"references",
"kegg_id" ,
"meta_cyc_id"
)
#first, loop over the met.nodes
L.sec_acc <- parLapply(cl, met.nodes, function(x) { # pblapply to track progress or lapply but slows down dramticlly the function and parLapply fo parallel
#second, loop over the xpath desired child-nodes
temp <- parLapply(cl, xpath_child.v, function(y) {
xml_find_all(x, y ) %>% xml_text(trim = T) %>% data.frame( value = .)
})
#set their names
names(temp) = child.names.v
return(temp)
})
end.time<- Sys.time()
total.time<- end.time-start.time
print(total.time)
return(L.sec_acc )
}
Now create the enviroment :
# select the location where the XML file is
location= "D:/path/to/file//HMDB/DataSets/serum_metabolites/serum_metabolites.xml"
cl <-makeCluster(detectCores(), type="PSOCK")
clusterExport(cl, c("Short_Parser_HMDB", "cl"))
clusterEvalQ(cl,{library("parallel")
library("magrittr")
library("XML")
library("xml2")
})
And execute :
Short_outp<-Short_Parser_HMDB(location)
stopCluster(cl)
The error received:
> Short_outp<-Short_Parser_HMDB(location)
Error in checkForRemoteErrors(val) :
one node produced an error: invalid connection
base on those links, Tried to implement the parallel :
Parallel Processing in R
How to call global function from the parLapply function?
Error in R parallel:Error in checkForRemoteErrors(val) : 2 nodes produced errors; first error: cannot open the connection
but couldn't find invalid connection as an error
I'm using windows 10 the latest R version 4.0.2 (not sure if it's enough information)
Any hint or idea will be appreciated

Why do I get this error using biomod2:response.plot2, and is it important? Error in ncol(dat_) : could not find function "ncol"

When I run the example for the response.plot2 function (biomod2 package) I get the above error. The code produces some plots but does not save an object
Here's the example (including the code that I ran): https://www.rdocumentation.org/packages/biomod2/versions/3.3-7.1/topics/response.plot2
)
[edit:]
The source code for the function response.plot2 is here:
https://r-forge.r-project.org/scm/viewvc.php/checkout/pkg/biomod2/R/response.plot.R?revision=728&root=biomod
It includes these lines:
.as.ggdat.1D <-
function (rp.dat)
{
# requireNamespace('dplyr')
out_ <- bind_rows(lapply(rp.dat, function(dat_) {
dat_$id <- rownames(dat_)
id.col.id <- which(colnames(dat_) == "id")
expl.dat_ <- dat_ %>% dplyr::select(1, id.col.id) %>%
tidyr::gather("expl.name", "expl.val", 1)
pred.dat_ <- dat_ %>% dplyr::select(-1, id.col.id) %>%
tidyr::gather("pred.name", "pred.val", (1:(ncol(dat_)-2)))
out.dat_ <- dplyr::full_join(expl.dat_, pred.dat_)
out.dat_$expl.name <- as.character(out.dat_$expl.name)
out.dat_$pred.name <- as.character(out.dat_$pred.name)
return(out.dat_)
}))
out_$expl.name <- factor(out_$expl.name, levels = unique(out_$expl.name))
return(out_)
}
I tried changing ncol(dat_) to base::ncol(dat_) and then running the whole lot to redefine the function response.plot2 for my R session, but I got a different error message:
Error in base::ncol : could not find function "::"

How to change value if error occurs in for loop?

I have a loop that reads HTML table data from ~ 440 web pages. The code on each page is not exactly the same, so sometimes I need table node 1 and sometime I need node 2. Right now I've just been setting the node number manually in a list and feeding it into the loop. My problem is that the page nodes have started changing and updating the node # list is getting to be a hassle.
If the loop encounters the wrong node # (ie: 1 instead of 2, or reverse) it gives an error and shuts down. Is there a way to have the loop replace the erroneous node number to the correct one if it encounters an error, and then keep running the loop as if nothing happened?
Here's the readHTML portion of the code in my loop with an example url:
url <- "http://espn.go.com/nba/player/gamelog/_/id/2991280/year/2013/"
html.page <- htmlParse(url)
tableNodes <- getNodeSet(html.page, "//table")
x <- as.numeric(Players$Nodes[s])
tbl = readHTMLTable(tableNodes[[x]], colClasses = c("character"),stringsAsFactors = FALSE)
Here's the error I get when the node # is wrong:
"Error in readHTMLTable(tableNodes[[x]], colClasses = c("character"), stringsAsFactors = FALSE) : error in evaluating the argument 'doc' in selecting a method for function 'readHTMLTable': Error in tableNodes[[x]] : subscript out of bounds"
Example code:
A <- c("dog", "cat")
Nodes <- as.data.frame(1:1)
#)Nodes <- as.data.frame(1:2) <-- This works without errors
colnames(Nodes)[1] <- "Col1"
Nodes2 <- 2
url <-c("http://espn.go.com/nba/player/gamelog/_/id/6639/year/2013/","http://espn.go.com/nba/player/gamelog/_/id/6630/year/2013/")
for (i in 1:length(A))
{
html.page <- htmlParse(url[i])
tableNodes <- getNodeSet(html.page, "//table")
x <- as.numeric(Nodes$Col1[i])
df = readHTMLTable(tableNodes[[x]], colClasses = c("character"),stringsAsFactors = FALSE)
#tryCatch(df) here.....no clue
assign(paste0("", A[i]), df)
}
If you get subscript out of bounds error msg, then you should try to with a lower x for sure. General demo with tryCatch based on the demo code you posted in the original question (although I have replaced x with 2 as I have no idea what is Players and s):
> msg <- tryCatch(readHTMLTable(tableNodes[[2]], colClasses = c("character"),stringsAsFactors = FALSE), error = function(e)e)
> str(msg)
List of 2
$ message: chr "error in evaluating the argument 'doc' in selecting a method for function 'readHTMLTable': Error in tableNodes[[2]] : subscript"| __truncated__
$ call : language readHTMLTable(tableNodes[[2]], colClasses = c("character"), stringsAsFactors = FALSE)
- attr(*, "class")= chr [1:3] "simpleError" "error" "condition"
> msg$message
[1] "error in evaluating the argument 'doc' in selecting a method for function 'readHTMLTable': Error in tableNodes[[2]] : subscript out of bounds\n"
> grepl('subscript out of bounds', msg$message)
[1] TRUE

How to parse xml/sbml with R package xml?

I'm trying to parse information from the sbml/xml file below
https://dl.dropboxusercontent.com/u/10712588/file.xml
from this code
http://search.bioconductor.jp/codes/11172
It seems that I can import the file normally by
doc <- xmlTreeParse(filename,ignoreBlanks = TRUE)
but I can't recover node attributes by
atrr <- xpathApply(doc, "//species[#id]", xmlGetAttr, "id")
or
xpathApply(doc, "//species", function(n) xmlValue(n[[2]]))
A node of the file follows...
<species id="M_10fthf_m" initialConcentration="1" constant="false" hasOnly
SubstanceUnits="false" name="10-formyltetrahydrofolate(2-)" metaid="_metaM_10fth
f_m" boundaryCondition="false" sboTerm="SBO:0000247" compartment="m">
<notes>
<body xmlns="http://www.w3.org/1999/xhtml">
<p>FORMULA: C20H21N7O7</p>
<p>CHARGE: -2</p>
<p>INCHI: InChI=1S/C20H23N7O7/c21-20-25-16-15(18(32)26-20)23-11(7-22
-16)8-27(9-28)12-3-1-10(2-4-12)17(31)24-13(19(33)34)5-6-14(29)30/h1-4,9,11,13,23
H,5-8H2,(H,24,31)(H,29,30)(H,33,34)(H4,21,22,25,26,32)/p-2/t11-,13+/m1/s1</p>
<p>HEPATONET_1.0_ABBREVIATION: HC00212</p>
<p>EHMN_ABBREVIATION: C00234</p>
</body>
</notes>
<annotation>
...
I would like to retrieve all information inside species node, anyone know how to do that?
There exists an SBML parsing library libSBML (http://sbml.org/Software/libSBML).
This includes a binding to R that would allow access to the SBML objects directly within R using code similar to
document = readSBML(filename);
errors = SBMLErrorLog_getNumFailsWithSeverity(
SBMLDocument_getErrorLog(document),
enumToInteger("LIBSBML_SEV_ERROR", "_XMLErrorSeverity_t")
);
if (errors > 0) {
cat("Encountered the following SBML errors:\n");
SBMLDocument_printErrors(document);
q(status=1);
}
model = SBMLDocument_getModel(document);
if (is.null(model)) {
cat("No model present.\n");
q(status=1);
}
species = Model_getSpecies(model, index_of_species);
id = Species_getId(species);
conc = Species_getInitialConcentration(species)
There is a Species_get(NameOfAttribute) function for each possible attribute; together with Species_isSet(NameOfAttribute); Species_set(NameOfAttribute) and Species_unset(NameOfAttribute).
The API is similar for interacting with any SBML element.
The libSBML releases include R installers that are available from
http://sourceforge.net/projects/sbml/files/libsbml/5.8.0/stable
navigating to the R_interface subdirectory for the OS and architecture of your choice.
The source code distribution of libSBML contains an examples/r directory with many examples of using libSBML to interact with SBML in the R environment.
I guess it depends on what you mean when you say you want to "retrieve" all the information in the species nodes, because that retrieved data could be coerced to any number of different formats. The following assumes you want it all in a data frame, where each row is an species node from your XML file and the columns represent different pieces of information.
When just trying to extract information, I generally find it easier to work with lists than with XML.
doc <- xmlTreeParse(xml_file, ignoreBlanks = TRUE)
doc_list <- xmlToList(doc)
Once it's in a list, you can figure out where the species data is stored:
sapply(x, function(x)unique(names(x)))
[[1]]
NULL
[[2]]
NULL
[[3]]
NULL
[[4]]
[1] "species"
[[5]]
[1] "reaction"
[[6]]
[1] "metaid"
$.attrs
[1] "level" "version"
So you really only want the information in doc_list[[4]]. Take a look at just the first component of doc_list[[4]]:
str(doc_list[[4]][[1]])
List of 9
$ : chr "FORMULA: C20H21N7O7"
$ : chr "CHARGE: -2"
$ : chr "HEPATONET_1.0_ABBREVIATION: HC00212"
$ : chr "EHMN_ABBREVIATION: C00234"
$ : chr "http://identifiers.org/obo.chebi/CHEBI:57454"
$ : chr "http://identifiers.org/pubchem.compound/C00234"
$ : chr "http://identifiers.org/hmdb/HMDB00972"
$ : Named chr "#_metaM_10fthf_c"
..- attr(*, "names")= chr "about"
$ .attrs: Named chr [1:9] "M_10fthf_c" "1" "false" "false" ...
..- attr(*, "names")= chr [1:9] "id" "initialConcentration" "constant" "hasOnlySubstanceUnits" ...
So you have the information contained in the first eight lists, plus the information contained in the attributes.
Getting the attributes information is easy because it's already named. The following formats the attributes information into a data frame for each node:
doc_attrs <- lapply(doc_list[[4]], function(x) {
x <- unlist(x[names(x) == ".attrs"])
col_names <- gsub(".attrs.", "", names(x))
x <- data.frame(matrix(x, nrow = 1), stringsAsFactors = FALSE)
colnames(x) <- col_names
x
})
Some nodes didn't appear to have attributes information and so returned empty data frames. That caused problems later so I created data frames of NAs in their place:
doc_attrs_cols <- unique(unlist(sapply(doc_attrs, colnames)))
doc_attrs[sapply(doc_attrs, length) == 0] <-
lapply(doc_attrs[sapply(doc_attrs, length) == 0], function(x) {
df <- data.frame(matrix(rep(NA, length(doc_attrs_cols)), nrow = 1))
colnames(df) <- doc_attrs_cols
df
})
When it came to pulling non-attribute data, the names and values of the variables were generally contained within the same string. I originally tried to come up with a regular expression to extract the names, but they're all formatted so differently that I gave up and just identified all the possibilities in this particular data set:
flags <- c("FORMULA:", "CHARGE:", "HEPATONET_1.0_ABBREVIATION:",
"EHMN_ABBREVIATION:", "obo.chebi/CHEBI:", "pubchem.compound/", "hmdb/HMDB",
"INCHI: ", "kegg.compound/", "kegg.genes/", "uniprot/", "drugbank/")
Also, sometimes the non-attribute information was kept as just a list of values, as in the node I showed above, while other times it was contained in "notes" and "annotation" sublists, so I had to include an if else statement to make things more consistent.
doc_info <- lapply(doc_list[[4]], function(x) {
if(any(names(x) != ".attrs" & names(x) != "")) {
names(x)[names(x) != ".attrs"] <- ""
x <- unlist(do.call("c", as.list(x[names(x) != ".attrs"])))
} else {
x <- unlist(x[names(x) != ".attrs"])
}
x <- gsub("http://identifiers.org/", "", x)
need_names <- names(x) == ""
names(x)[need_names] <- gsub(paste0("(", paste0(flags, collapse = "|"), ").+"), "\\1", x[need_names], perl = TRUE)
#names(x) <- gsub("\\s+", "", names(x))
x[need_names] <- gsub(paste0("(", paste0(flags, collapse = "|"), ")(.+)"), "\\2", x[need_names], perl = TRUE)
col_names <- names(x)
x <- data.frame(matrix(x, nrow = 1), stringsAsFactors = FALSE)
colnames(x) <- col_names
x
})
To get everything together into a data frame, I suggest the plyr package's rbind.fill.
require(plyr)
doc_info <- do.call("rbind.fill", doc_info)
doc_attrs <- do.call("rbind.fill", doc_attrs)
doc_all <- cbind(doc_info, doc_attrs)
dim(doc_all)
[1] 3972 22
colnames(doc_all)
[1] "FORMULA:" "CHARGE:" "HEPATONET_1.0_ABBREVIATION:" "EHMN_ABBREVIATION:"
[5] "obo.chebi/CHEBI:" "pubchem.compound/" "hmdb/HMDB" "about"
[9] "INCHI: " "kegg.compound/" "kegg.genes/" "uniprot/"
[13] "drugbank/" "id" "initialConcentration" "constant"
[17] "hasOnlySubstanceUnits" "name" "metaid" "boundaryCondition"
[21] "sboTerm" "compartment"
As a partial answer, the document uses name spaces, and 'species' is part of the 'id' name space. So
> xpathSApply(doc, "//id:species", xmlGetAttr, "id", namespaces="id")
[1] "M_10fthf_c" "M_10fthf_m" "M_13dampp_c" "M_h2o_c" "M_o2_c"
[6] "M_bamppald_c" "M_h2o2_c" "M_nh4_c" "M_h_m" "M_nadph_m"
...
with id:species and namespaces="id" being different from what you illustrate above.

Resources