I am trying to scrape a website using the following:
industryurl <- "https://finance.yahoo.com/industries"
library(rvest)
read <- read_html(industryurl) %>%
html_table()
library(plyr)
industries <- ldply(read, data.frame)
industries = industries[-1,]
read <- read_html(industryurl)
industryurls <- html_attr(html_nodes(read, "a"), "href")
links <- industryurls[grep("/industry/", industryurls)]
industryurl <- "https://finance.yahoo.com"
links <- paste0(industryurl, links)
links
##############################################################################################
store <- NULL
tbl <- NULL
for(i in links){
store[[i]] = read_html(i)
tbl[[i]] = html_table(store[[i]])
}
#################################################################################################
I am mostly interested in the code between ########## and I want to apply a function instead of a for loop since I am running into time out issues with yahoo and I want to make it more human like to extract this data (it is not too much).
My question is, how can I take links apply a function and set a sort of delay timer to read in the contents of the for loop?
I can paste my own version of the for loop which does not work.
This is the function I came up with
##First argument is the link you need
##The second argument is the total time for Sys.sleep
extract_function <- function(define_link, define_time){
print(paste0("The system will stop for: ", define_time, " seconds"))
Sys.sleep(define_time)
first <- read_html(define_link)
print(paste0("It will now return the table for link", define_link))
return(html_table(first))
}
##I added the following tryCatch function
link_try_catch <- function(define_link, define_time){
out <- tryCatch(extract_function(define_link,define_time), error =
function(e) NA)
return(out)
}
##You can now retrieve the data using the links vector in two ways
##Picking the first ten, so it should not crash on link 5
p <- lapply(1:10, function(i)link_try_catch(links[i],1))
##OR (I subset the vector just for demo purposes
p2 <- lapply(links[1:10], function(i)extract_function(i,1))
Hope it helps
Related
Hi dear community members.
I'm now trying to get the data of article titles on this website (https://yomou.syosetu.com/search.php?&type=er&order_former=search&order=new¬nizi=1&p=1) by R.
I executed the following code.
### read HTML ###
html_narou <- rvest::read_html("https://yomou.syosetu.com/search.php?&type=er&order_former=search&order=new¬nizi=1&p=1",
encoding = "UTF-8")
### create the common part object of CSS ###
base_css_former <- "#main_search > div:nth-child("
base_css_latter <- ") > div > a"
### create NULL objects ###
art_css <- NULL
narou_titles <- NULL
### extract the title data and store them into the NULL object ###
#### The titles of the articles doesn't exist in the " #main_search > div:nth-child(1~4) > div > a ", so i in the loop starts from five ####
for (i in 5:24) {
art_css <- paste0(base_css_former, as.character(i), base_css_latter)
narou_title <- rvest::html_element(x = html_narou,
css = art_css) %>%
rvest::html_text()
narou_titles <- base::append(narou_titles, narou_title)
}
But it takes long to do this by for-loop in R and I want to use "map" function in "purrr" instead. However I'm not familiar with purrr::map and the process is complicated.
How can I substitute map for for-loop?
The real issue is that you’re increasing the size of your narou_titles vector on every iteration, which is notoriously slow in R. Instead, you should pre-allocate the vector to its final length, then assign elements by index. purrr does this behind the scenes, which can make it appear faster, but you can do the same thing without purrr.
With your for loop:
library(rvest)
narou_titles <- vector("character", 20)
for (i in 5:24) {
art_css <- paste0(base_css_former, as.character(i), base_css_latter)
narou_titles[[i]] <- html_element(
x = html_narou,
css = art_css
) %>%
html_text()
}
With purrr::map_chr():
library(rvest)
library(purrr)
get_title <- function(i) {
art_css <- paste0(base_css_former, as.character(i), base_css_latter)
html_element(
x = html_narou,
css = art_css
) %>%
html_text()
}
narou_titles <- map_chr(5:24, get_title)
I have an excel file that contains certain keywords that need to be searched in google through R.
The output to be created is a data frame which contains the following variables:
Keyword;Position(position of the url in the search results);Title(title of the ith search result);Text(text in that search result);URL;Domain
The keywords and some example of the output are given in the link below:
https://drive.google.com/file/d/1AM3d5Hbf5nBpbRG1ydnZM7ZG2AdUyy-6/view?usp=sharing
(Sheet 1 has the keywords and sheet 2 has the sample output)
I tried to create a similar output but there seems to be an error.
Code:
# Web Scraping in R
library(XML)
library(RCurl)
library(dplyr)
library(rvest)
library(urltools)
library(htm2txt)
library(readxl)
data <- read_excel(file.choose()) # Importing the data
output <- data.frame(matrix(ncol=6,nrow=0))
colnames(output) <- c("Name","Position","Title","Text","URL","Domain")
for (i in 1:nrow(data)) {
search.term <- data[i,1]
getGoogleURL <- function(search.term, domain = '.com', quotes=TRUE)
{
search.term <- gsub(' ', '%20', search.term) # Cleaning the Search Term
if(quotes) search.term <- paste('%22', search.term, '%22', sep='')
getGoogleURL <- paste('http://www.google', domain, '/search?q=',
search.term, sep='')
}
quotes <- "False"
search.url <- getGoogleURL(search.term=search.term, quotes=quotes)
page <- read_html(search.url)
links <- page %>% html_nodes("a") %>% html_attr("href")
link <- links[startsWith(links, "/url?q=")]
link <- sub("^/url\\?q\\=(.*?)\\&sa.*$","\\1", link)
for (j in 1:length(link)) {
page1 <- read_html(link[j])
name <- data[i,1]
position <- j
title <- page1 %>% html_node("title") %>% html_text()
text <- gettxt(link[j])
url <- link[j]
domain <- suffix_extract(domain(link[j]))$host
vect <- c(name,position,title,text,url,domain)
output <- rbind(output,vect)
}
}
The error being shown is:
Error in match.names(clabs, nmi) : names do not match previous names
Please help, I'm new to R.
That error comes from rbind when the columns don't line up perfectly. For instance, if there is a missing or extra column. In this case, it might be because one of your vect variables is empty/NULL or length over 1.
rbind(data.frame(a=1,b=2), data.frame(b=3))
# Error in rbind(deparse.level, ...) :
# numbers of columns of arguments do not match
Since iteratively adding rows to a frame gets expensive (it makes a complete copy of the frame every time even one row is added, this is grossly inefficient), it's generally better to append to a list and convert into a frame in one call.
out <- list()
for (i in seq_len(nrow(data))) {
# ...
for (j in seq_along(link)) {
# ...
vect <- c(name, position, title, text, url, domain)
stopifnot(length(vect) == 6L)
out <- c(out, list(vect))
}
}
outout <- do.call(rbind.data.frame, out)
colnames(output) <- c("Name", "Position", "Title", "Text", "URL", "Domain")
(In reality, instead of stopifnot, one might record the url and data retrieved into a different list for forensic purposes. Or find the missing element and NA it before adding to the list. Either way, stopifnot is intended here as a placeholder for something more contextually relevant to you and your process.)
I am trying to write a for loop that provides the author's short ID to a database called RePEc and gets the affiliation data. So far, this is what I have (NB: USERCODE only works on my IP):
url <- "https://api.repec.org/call.cgi?code=USERCODE&getauthorrecordraw="
for(i in 1:length(df_affiliations)){
Sys.sleep(1)
affiliation_fun <- paste(url,df_affiliations$author_reg_1[i])
affiliation_run <- fromJSON(txt=affiliation_fun) %>% select("affiliation") %>% unlist(use.names=FALSE)
affiliation_1 <- paste(unlist(affiliation_run), collapse =" ")
df_affiliations$vector <- rbind(affiliation_1)
}
Every time I try this, I either get only the last value or a HTTP Error 400. For loops are not my strong suit. Can anyone figure out where I've gone wrong? Thanks in advance for your help!
Your loop is over writing your output variable on each iteration. A solution here is to initialize an empty vector outside of the loop and then assign the value to each index:
url <- "https://api.repec.org/call.cgi?code=USERCODE&getauthorrecordraw="
#define empty vector
affiliation_1<-vector(length=length(df_affiliations))
for(i in 1:length(df_affiliations)){
Sys.sleep(1)
affiliation_fun <- paste0(url,df_affiliations$author_reg_1[i])
affiliation_run <- fromJSON(affiliation_fun) %>% select(affiliation) %>% unlist(use.names=FALSE)
affiliation_1[i] <- paste(unlist(affiliation_run), collapse =" ")
}
This is untested since the question is not reproducible, but should provide a starting point for your final solution.
So I am trying to make a basic sensitivity analysis script. The outputs come out as I want via the print I added to the end of the script. Issue is that I would like a tibble or object that has all the outputs appended together that I can export as a csv or xlsx.
I created two functions, sens_analysis which runs all the code, and multiply_across which multiplies across each possible percentage across each possible column of your table. You need multiply_across to run the sens_analysis.
I would normally like a title but instead I just added an indicator column instead that I can sort by.
I made everything with mtcars so it should be easy to replicate, the issue is that I just have a huge print at the end; not an object that I can manipulate or pull from for other analysis.
I have been trying the rbind, bind_row, appending rows in a variety of ways.
Or building a new object. As you can see in the code at line (18) I make something called output that I have tried to populate, which hasn't gone well.
rm(list = ls())
library(dplyr)
library(tidyr)
library(purrr)
library(tibble)
library(magrittr)
library(xtable)
data<-mtcars
percent<-c(.05,.1,.15)
goods<-c("hp","gear","wt")
weight<-c(6,7,8)
disagg<-"cyl"
func<-median
sens_analysis<-function(data=data, goods=goods, weight=weight, disagg=disagg, precent=percent, func=func){
output<-NULL%>%
as.tibble()
basket<-(rbind(goods,weight))
percent<-c(0,percent,(percent*-1))
percent_to_1<-percent+1
data_select<-data%>%
dplyr::select(c(goods,disagg))%>%
group_by_at(disagg)%>%
summarise_at(.vars = goods ,.funs = func)%>%
as_tibble()
data_select_weight<-purrr::map2(data_select[,-1], as.numeric(basket[2,]),function(var, weight){
var*weight
})%>% as_tibble %>%
add_column(data_select[,1], .before = 1)
colnames(data_select_weight)[1]<-disagg
multiply_across(data_select_weight,percent_to_1)
return(output)
#output2<-rbind(output2,output)
}
############################
multiply_across<-function(data=data_select_weight,list=percent_to_1){
varlist<-names(data[,-1])
for(i in varlist){
df1 = data[,i]
for(j in list){
df<-data
df[,i]<-round(df1*j,2)
df<-mutate(df, total = round(rowSums(df[,-1]),2))%>%
mutate(type=paste0(i," BY ",(as.numeric(j)-1)*100,"% OVER ",disagg))%>%
print(df)
#output<-bind_rows(output,df)
#output<-bind_rows(output,df)
#output[[j]]<-df[[j]]
}
}
}
##############################################################################################
sens_analysis(data,goods,weight,disagg,percent,func)
The expected result if you just run the code straight-up should just be a bunch of printed tibbles, that arent in an object. But ideally, for future analysis on the data or easy of use, a table of the outputs appended together would be best.
So I figured it out and will add my answer here in case someone else hits this issues.
I created a list within loops and then binded those lists together.
Just focus on the binding rows outside the right for-loop.
multiply_across<-function(data=data_select_weight,
list=percent_to_1){
varlist <- colnames(data[, -1])
output_list <- list()
for (i in varlist) {
df1 <- data[,i]
for (j in list) {
name <- paste0(i, " BY ", (as.numeric(j)-1)*100, "% OVER ", disagg)
df <- as_tibble(data)
df[,i] <- round(df1*j, 2)
df <- mutate(df, total = round(rowSums(df[,-1]),2))%>%
mutate(type = paste0(i, " BY ", (as.numeric(j)-1)*100, "% OVER ", disagg))
df<-df[,c(6,1,2,3,4,5)]
output_list[[paste0(i," BY ",(as.numeric(j)-1)*100)]] <- (assign(paste0(i," BY ",(as.numeric(j)-1)*100,"% OVER ",disagg),df))
}
}
bind_rows(lapply(output_list,
as.data.frame.list,
stringsAsFactors=F))
}
I'm looking for some assistance in writing some R code to iterate through rows in a dataframe and pass the values in each row to a function and print the output either to an excel file, txt file or just in the console.
The purpose of this is to automate a bunch of distance/time queries (several hundred) to google maps using the function found at this website: http://www.nfactorialanalytics.com/r-vignette-for-the-week-finding-time-distance-between-two-places/
The function on that website is as follows:
library(XML)
library(RCurl)
distance2Points <- function(origin,destination){
results <- list();
xml.url <- paste0('http://maps.googleapis.com/maps/api/distancematrix/xml?origins=',origin,'&destinations=',destination,'&mode=driving&sensor=false')
xmlfile <- xmlParse(getURL(xml.url))
dist <- xmlValue(xmlChildren(xpathApply(xmlfile,"//distance")[[1]])$value)
time <- xmlValue(xmlChildren(xpathApply(xmlfile,"//duration")[[1]])$value)
distance <- as.numeric(sub(" km","",dist))
time <- as.numeric(time)/60
distance <- distance/1000
results[['time']] <- time
results[['dist']] <- distance
return(results)
}
The dataframe will contain two columns: origin postal code and destination postal code (Canada, eh?). I'm a beginner R programmer, so I know how to use read.table to load a txt file into a dataframe. I'm just not sure how iterate through the dataframe, each time passing values to the distance2Points function and executing. I think this can be done using either a for loop or one of the apply calls?
Thanks for the help!
edit:
To keep it simple lets assume I want to transform these two vectors into a dataframe
> a <- c("L5B4P2","L5B4P2")
> b <- c("M5E1E5", "A2N1T3")
> postcodetest <- data.frame(a,b)
> postcodetest
a b
1 L5B4P2 M5E1E5
2 L5B4P2 A2N1T3
How should I go about iterating over these two rows to return both distances and times from the distance2Points function?
Here's one way to do it, using lapply to produce a list with the results for each row in your data and using Reduce(rbind, [yourlist]) to concatenate that list into a data frame whose rows correspond to the ones in your original. To make this work, we also have to tweak the code in the original function to return a one-row data frame, so I've done that here.
distance2Points <- function(origin,destination){
require(XML)
require(RCurl)
xml.url <- paste0('http://maps.googleapis.com/maps/api/distancematrix/xml?origins=',origin,'&destinations=',destination,'&mode=driving&sensor=false')
xmlfile <- xmlParse(getURL(xml.url))
dist <- xmlValue(xmlChildren(xpathApply(xmlfile,"//distance")[[1]])$value)
time <- xmlValue(xmlChildren(xpathApply(xmlfile,"//duration")[[1]])$value)
distance <- as.numeric(sub(" km","",dist))
time <- as.numeric(time)/60
distance <- distance/1000
# this gives you a one-row data frame instead of a list, b/c it's easy to rbind
results <- data.frame(time = time, distance = distance)
return(results)
}
# now apply that function rowwise to your data, using lapply, and roll the results
# into a single data frame using Reduce(rbind)
results <- Reduce(rbind, lapply(seq(nrow(postcodetest)), function(i)
distance2Points(postcodetest$a[i], postcodetest$b[i])))
Result when applied to your sample data:
> results
time distance
1 27.06667 27.062
2 1797.80000 2369.311
If you would prefer to do this without creating a new object, you could also write separate functions for computing time and distance -- or a single function with those outputs as options -- and then use sapply or just mutate to create new columns in your original data frame. Here's how that might look using sapply:
distance2Points <- function(origin, destination, output){
require(XML)
require(RCurl)
xml.url <- paste0('http://maps.googleapis.com/maps/api/distancematrix/xml?origins=',
origin, '&destinations=', destination, '&mode=driving&sensor=false')
xmlfile <- xmlParse(getURL(xml.url))
if(output == "distance") {
y <- xmlValue(xmlChildren(xpathApply(xmlfile,"//distance")[[1]])$value)
y <- as.numeric(sub(" km", "", y))/1000
} else if(output == "time") {
y <- xmlValue(xmlChildren(xpathApply(xmlfile,"//duration")[[1]])$value)
y <- as.numeric(y)/60
} else {
y <- NA
}
return(y)
}
postcodetest$distance <- sapply(seq(nrow(postcodetest)), function(i)
distance2Points(postcodetest$a[i], postcodetest$b[i], "distance"))
postcodetest$time <- sapply(seq(nrow(postcodetest)), function(i)
distance2Points(postcodetest$a[i], postcodetest$b[i], "time"))
And here's how you could do it in a dplyr pipe with mutate:
library(dplyr)
postcodetest <- postcodetest %>%
mutate(distance = sapply(seq(nrow(postcodetest)), function(i)
distance2Points(a[i], b[i], "distance")),
time = sapply(seq(nrow(postcodetest)), function(i)
distance2Points(a[i], b[i], "time")))