I want to extract the data in the "Completed Games" table located here "https://www.chess.com/member/magnuscarlsen".
The code below gives me a list of size 0. The Selenium side of things seems to be working. A firefox browser opens on my desktop and navigates to the page. Any help would be greatly appreciated. I'm at my wits end!
rD <- rsDriver(browser="firefox", port=4442L, verbose=F)
remDr <- rD[["client"]]
remDr$navigate("https://www.chess.com/member/magnuscarlsen")
Sys.sleep(5) # give the page time to fully load
html <- remDr$getPageSource()[[1]]
html <- read_html(html)
signal <- html %>%
html_nodes("table.table-component table-hover archived-games-table")
1
If you don't mind not having the accuracy figures (for which I believe there is no published basis for calculation) have a look at the public APIs from Chess.com. You do get all the moves info included.
In particular, the implementations via BigChess package. I amended examples from there below:
All games:
library(rjson)
library(bigchess)
user <- "magnuscarlsen"
json_file <- paste0("https://api.chess.com/pub/player/", user,"/games/archives")
json_data <- fromJSON(paste(readLines(json_file), collapse = ""))
result <- data.frame()
for(i in json_data$archives)
result <- rbind(result, read.pgn(paste0(i, "/pgn")))
Single month:
library(bigchess)
df <- read.pgn("https://api.chess.com/pub/player/magnuscarlsen/games/2020/12/pgn")
print(df[df$Date == '2020.12.11'])
Adding in your accuracies as requested. Most of the info on that page is actually available via the APIs:
library(bigchess)
#> Warning: package 'bigchess' was built under R version 4.0.3
library(purrr)
library(jsonlite)
#> Warning: package 'jsonlite' was built under R version 4.0.3
#>
#> Attaching package: 'jsonlite'
#> The following object is masked from 'package:purrr':
#>
#> flatten
library(stringr)
try_again <- function(link) { #https://blog.r-hub.io/2020/04/07/retry-wheel/
maxtry <- 5
try <- 1
resp <- read_json(link)
while (try <= maxtry && is.null(resp$data)) {
resp <- read_json(.)
try <- try + 1
Sys.sleep(try * .25)
}
return(resp)
}
url <- "https://api.chess.com/pub/player/magnuscarlsen/games/2020/12"
result <- data.frame()
result <- read.pgn(paste0(url, "/pgn"))
#> Warning in readLines(con): incomplete final line found on 'https://
#> api.chess.com/pub/player/magnuscarlsen/games/2020/12/pgn'
#> 2021-02-15 20:29:04, successfully imported 47 games
#> 2021-02-15 20:29:04, N moves computed
#> 2021-02-15 20:29:04, extract moves done
#> 2021-02-15 20:29:04, stat moves computed
result <- filter(result, result$Date == "2020.12.11")
data <- read_json(url)
mask <- map(data$games, ~ !is.na(str_match(.x$pgn, 'UTCDate\\s\\"2020\\.12\\.11')[, 1])) %>% unlist()
games <- data$games[mask]
games <- paste0("https://www.chess.com/callback/analysis/game/live/", map(games, ~ str_match(.x$url, "\\d+")[, 1]), "/all")
df <- map_df(games, ~ {
json_data <- try_again(.x)
tryCatch(
data.frame(
Url = .x,
WhiteAccuracy = json_data$data$analysis$CAPS$white$all,
BlackAccuracy = json_data$data$analysis$CAPS$black$all,
stringsAsFactors = FALSE
),
error = function(e) {
data.frame(
Url = .x,
WhiteAccuracy = NA_integer_,
BlackAccuracy = NA_integer_,
stringsAsFactors = FALSE
)
}
)
})
final <- cbind(result, df)
#> Error in .cbind.ts(list(...), .makeNamesTs(...), dframe = FALSE, union = TRUE): non-time series not of the correct length
Created on 2021-02-15 by the reprex package (v0.3.0)
Here is an approach that solves your problem easily because the page itself has just one table. Use rvest for easily getting it out. Note that I used pipes because I prefer them. You can of course do without them.
library(RSelenium)
library(rvest)
rD <- rsDriver(browser="firefox", port=4443L, verbose=F)
remDr <- rD[["client"]]
remDr$navigate("https://www.chess.com/member/magnuscarlsen")
Sys.sleep(5) # give the page time to fully load
html <- remDr$getPageSource()[[1]]
html <- read_html(html)
##required table
html %>% html_table() %>% .[[1]]
Related
I am running into an error while trying to make a corpus object from the tm package in R.
The data have been scraped from a website and I have included the full code below so you can run and see how the data were gathered and the tibble was created. The very last line of code is where I am getting stuck! (I have modified the loop so it should run in a few seconds).
Any help would be appreciated. :)
library(tidyverse)
library(rvest)
##########################################
# WEB SCRAPING FROM SCHOLARLYKITCHEN.COM #
##########################################
# create loop that iteratively adds page numbers onto
# keep the loop numbers small for testing before full data is pulled in
output <- character()
for (i in 1:2) {
article.links <- paste0("https://scholarlykitchen.sspnet.org/archives/page/", i ,"/") %>%
read_html() %>%
html_nodes(".list-article__title") %>%
html_nodes("a") %>%
html_attr("href")
output <- c(output, article.links)
}
# get all comments
get.comments <- function(output) {
article.page <- read_html(output)
article.comments <- article.page %>% html_nodes(".comment") %>% html_text() %>% trimws(which = "both")
return(article.comments)
}
text <- sapply(output, FUN = get.comments, USE.NAMES = FALSE)
# get all dates
get.dates <- function(output) {
article.page <- read_html(output)
article.comments <- article.page %>% html_nodes(".comment__meta__date") %>% html_text() %>% trimws(which = "both")
return(article.comments)
}
dates <- sapply(output, FUN = get.dates, USE.NAMES = FALSE)
# create the made df for the analysis
df <- tibble(
text = unlist(text, recursive = TRUE), # unlist is needed because sapply (for some reason) creates a list
dates = unlist(dates, recursive = TRUE)
)
# extract dates from meta data
df$dates <- as.character(gsub(",","",df$dates))
df$dates <- as.Date(df$dates, "%B%d%Y")
###################
# TOPIC MODELLING #
###################
library(tm)
library(topicmodels)
# create df ready for topic modelling
# this needs to have very specifically names columns
df.tm <- df[-2] # create dupelicate for backup (dates not needed for topic modelling yet)
df.tm$doc_id <- row.names(df) # create a unique id for each row as is needed by the tm package
df.tm <- df.tm[c(2,1)] # reorders the columns
# From the comments text, create the corpus
corpus <- VCorpus(DataframeSource(df))
Error is the below
Error in DataframeSource(df) :
all(!is.na(match(c("doc_id", "text"), names(x)))) is not TRUE
DataframeSource() requires the df to have a document index in its first column, and it must be labeled "doc_id".
Try:
df_with_id <- rowid_to_column(df, var = "doc_id") # Alternatively, generate a doc index that better represents your collection of documents.
corpus <- VCorpus(DataframeSource(df))
<<VCorpus>>
Metadata: corpus specific: 0, document level (indexed): 1
Content: documents: 141
I am trying to get information from Artsy using rvest package of R. I want to get information on name of painting, year, price, place (name of gallery, auction etc.), name of artist, and materials that are used. Information on material is provided in inside page of each painting. Codes that I tried to use are provided below:
library(rvest)
library(dplyr)
library(tidyverse)
get_material = function (painting_link) {
painting_page = read_html (painting_link)
material = painting_page %>% html_nodes('h2+ .kPqROo') %>%
html_text() %>% paste(collapse = ",")
return(material)
}
for(page_result in 2:3) {
link = paste0 ("https://www.artsy.net/collect?page=", page_result, "&additional_gene_ids%5B0%5D=painting")
page = read_html(link)
painting_name_year = page %>% html_nodes("#main .kjRHrZ") %>% html_text()
painting_link = page %>% html_nodes('#main .kjRHrZ') %>% html_attr("<div color="black60" font-family="sans" class="Box-sc-15se88d-0 Text-sc-18gcpao-0 kjRHrZ">\n<i>") %>% paste("https://www.artsy.net", ., sep="/")
price = page %>% html_nodes('.ibabyz') %>% html_text()
place = page %>% html_nodes('hWKLzd') %>% html_text()
artist = page %>% html_nodes('.bQOCym .bQOCym') %>% html_text()
material = sapply(painting_link, FUN=get_material, USE.NAMES = FALSE)
}
artsy <- data.frame(painting_name_year, price, place, artist)
view(artsy)
Code for painting_link, place, and material are not working. Moreover, one observation is repeating for 3 times. How can I fix this problem?
You can remove the loop. First generate the start url list. Then, rather than scrape some info from landing pages, before visiting individual listing pages, you can instead, gather all the urls of the individual listings first.
Then, you can gain a little efficiency by working across more cpu cores and gathering the data you want from all the listings via a function call against each url.
N.B. As this operation is I/O bound you would likely see better efficiencies with an asynchronous method. If I can find a decent tutorial/reference on this I will maybe update this answer.
If you return a tibble of the desired info, from each listing url, via the function, you can generate a final dataframe by calling future_map_dfr on the listings links and user defined function.
library(purrr)
library(rvest)
#> Loading required package: xml2
#> Warning: package 'xml2' was built under R version 4.0.3
#>
#> Attaching package: 'rvest'
#> The following object is masked from 'package:purrr':
#>
#> pluck
library(tidyverse)
#> Warning: package 'tibble' was built under R version 4.0.3
#> Warning: package 'forcats' was built under R version 4.0.3
library(jsonlite)
#> Warning: package 'jsonlite' was built under R version 4.0.3
#>
#> Attaching package: 'jsonlite'
#> The following object is masked from 'package:purrr':
#>
#> flatten
library(furrr)
#> Warning: package 'furrr' was built under R version 4.0.3
#> Loading required package: future
#> Warning: package 'future' was built under R version 4.0.3
library(stringr)
get_art_links <- function(link) {
hrefs <- read_html(link) %>%
html_nodes("[href*=artwork][class]") %>%
html_attr("href") %>%
paste0("https://www.artsy.net", .)
return(hrefs)
}
get_listing_json <- function(page) {
data <- page %>%
html_node('[type="application/ld+json"]') %>%
html_text() %>%
jsonlite::parse_json()
return(data)
}
get_listing_info <- function(link) {
page <- read_html(link)
json <- get_listing_json(page)
artist <- json$brand$name
title <- page %>%
html_node('[data-test="artworkSidebar"] h2 > i') %>%
html_text()
production_date <- json$productionDate
material <- page %>%
html_node('[data-test="artworkSidebar"] h2 + div') %>%
html_text()
width <- json$width
height <- json$height
place <- stringr::str_match(json$description, "from (.*?),")[, 2]
price <- json$offers$price
currency <- json$offers$priceCurrency
availability <- str_replace(json$offers$availability, "https://schema.org/", "")
return(tibble(artist, title, production_date, material, width, height, place, price, currency, availability))
}
pages <- 2:3 %>% as.character()
urls <- sprintf("https://www.artsy.net/collect?page=%s&additional_gene_ids[0]=painting", pages)
links <- purrr::map(urls, get_art_links) %>%
unlist()
no_cores <- future::availableCores() - 1
future::plan(future::multisession, workers = no_cores)
results <- future_map_dfr(links, .f = get_listing_info)
Created on 2021-05-16 by the reprex package (v0.3.0)
I am trying to scrape Bangladesh COVID-19 data (number of tests, number of positive tests, positive rate) from the official website: http://103.247.238.92/webportal/pages/covid19.php
The website contains 3 drop-down menus to arrive at the data: Select Division; Select District; Select time frame for the data.
I have tried the following so far:
url <- "http://103.247.238.92/webportal/pages/covid19.php"
webpage <- read_html(url)
webpage has the following:
List of 2
$ node:<externalptr>
$ doc :<externalptr>
- attr(*, "class")= chr [1:2] "xml_document" "xml_node"
Since this did not help, I also tried the following based on this question:
a <- GET(url)
a <- content(a, as="text")
a <- gsub("^angular.callbacks._2\\(", "", a)
a <- gsub("\\);$", "", a)
df <- fromJSON(a, simplifyDataFrame = TRUE)
The above returns the following error:
Error: lexical error: invalid char in json text.
<!DOCTYPE html> <!-- This is a
(right here) ------^
So I am really lost in terms of how I can even read the data - but upon looking at the source of the webpage, I know that the data is right there: Safari Website inspector
Any suggestions on how I can read this data?
Additionally, if someone could help with how I can go about selecting the different drop-down menu items, that would be really appreciated. The final goal is to collect data for each district in each division for the last 12 months.
tl;dr
The page makes additional requests to pick up that info. Those additional requests rely on combinations of ids; an id pulled from the option element value attribute, of each option within Division dropdown, in tandem with an id pulled from the option element value attribute of each option within the District dropdown.
You can make an initial request to get all the Division dropdown ids:
divisions <- options_df("#division option:nth-child(n+2)", "division")
nth-child(n+2) is used to exclude the initial 'select' option.
This returns a dataframe with the initial divisionIDs and friendly division names.
Those ids can then be used to retrieve the associated districtIDs (the options which become available in the second dropdown after making your selection in the first):
districts <- pmap_dfr(
list(divisions$divisionID),
~ {
df_districts <- districts_from_updated_session(.x, "district") %>%
mutate(
divisionID = .x
)
return(df_districts)
}
)
This returns a dataframe mapping the divisionID to all the associated districtIDs, as well as the friendly district names:
By including the divisionID in both dataframes I can inner-join them:
div_district <- dplyr::inner_join(divisions, districts, by = "divisionID", copy = FALSE)
Up until now, I have been using a session object for the efficiency of tcp re-use. Unfortunately, I couldn't find anything in the documentation covering how to update an already open session allowing for sending a new POST request with dynamic body argument. Instead, I leveraged furrr::future_map to try and gain some efficiencies through parallel processing:
df <- div_district %>%
mutate(json = furrr::future_map(divisionID, .f = get_covid_data, districtID))
To get the final covid numbers, via get_covid_data(), I leverage some perhaps odd behaviour of the server, in that I could make a GET, passing divisionID and districtID within the body, then regex out part of the jquery datatables scripting, string clean that into a json valid string, then read that into a json object stored in the json column of the final dataframe.
Inside of the json column
R:
library(httr)
#> Warning: package 'httr' was built under R version 4.0.3
library(rvest)
#> Loading required package: xml2
#> Warning: package 'xml2' was built under R version 4.0.3
library(tidyverse)
#> Warning: package 'tibble' was built under R version 4.0.3
#> Warning: package 'forcats' was built under R version 4.0.3
library(jsonlite)
#> Warning: package 'jsonlite' was built under R version 4.0.3
#>
#> Attaching package: 'jsonlite'
#> The following object is masked from 'package:purrr':
#>
#> flatten
library(furrr)
#> Warning: package 'furrr' was built under R version 4.0.3
#> Loading required package: future
#> Warning: package 'future' was built under R version 4.0.3
## to clean out everything before a run
# rm(list = ls(all = TRUE))
# invisible(lapply(paste0('package:', names(sessionInfo()$otherPkgs)), detach, character.only=TRUE, unload=TRUE)) # https://stackoverflow.com/a/39235076 #mmfrgmpds
#returns value:text for options e.g. divisions/districts (dropdown)
options_df <- function(css_selector, level) {
nodes <- session %>% html_nodes(css_selector)
options <- nodes %>% map_df(~ c(html_attr(., "value"), html_text(.)) %>%
set_names(paste0(level, "ID"), level))
return(options)
}
#returns districts associated with division
districts_from_updated_session <- function(division_id, level) {
session <- jump_to(session, paste0("http://103.247.238.92/webportal/pages/ajaxDataDistrictDHIS2Dashboard.php?division_id=", division_id))
return(options_df("#district option:nth-child(n+2)", level))
}
# returns json object housing latest 12 month covid numbers by divisionID + districtID pairing
get_covid_data <- function(divisionID, districtID) {
headers <- c(
"user-agent" = "Mozilla/5.0",
"if-modified-since" = "Wed, 08 Jul 2020 00:00:00 GMT" # to mitigate for caching
)
data <- list("division" = divisionID, "district" = districtID, "period" = "LAST_12_MONTH", "Submit" = "Search")
r <- httr::GET(url = "http://103.247.238.92/webportal/pages/covid19.php", httr::add_headers(.headers = headers), body = data)
data <- stringr::str_match(content(r, "text"), "DataTable\\((\\[[\\s\\S]+\\])\\)")[1, 2] %>% #clean up extracted string so can be parsed as valid json
gsub("role", '"role"', .) %>%
gsub("'", '"', .) %>%
gsub(",\\s+\\]", "]", .) %>%
str_squish() %>%
jsonlite::parse_json()
return(data)
}
url <- "http://103.247.238.92/webportal/pages/covid19.php"
headers <- c("User-Agent" = "Mozilla/4.0", "Referer" = "http://103.247.238.92/webportal/pages/covid19.php")
session <- html_session(url, httr::add_headers(.headers = headers)) #for tcp re-use
divisions <- options_df("#division option:nth-child(n+2)", "division") #nth-child(n+2) to exclude initial 'select' option
districts <- pmap_dfr(
list(divisions$divisionID),
~ {
df <- districts_from_updated_session(.x, "district") %>%
mutate(
divisionID = .x
)
return(df)
}
)
div_district <- dplyr::inner_join(divisions, districts, by = "divisionID", copy = FALSE)
no_cores <- future::availableCores() - 1
future::plan(future::multisession, workers = no_cores)
df <- div_district %>%
mutate(json = future_map(divisionID, .f = get_covid_data, districtID))
Created on 2021-03-04 by the reprex package (v0.3.0)
Py
import requests, re, ast
from bs4 import BeautifulSoup as bs
def options_dict(soup, css_selector):
options = {i.text:i['value'] for i in soup.select(css_selector) if i['value']}
return options
def covid_numbers(text):
covid_data = p.findall(text)[0]
covid_data = re.sub(r'\n\s+', '', covid_data.replace("role","'role'"))
covid_data = ast.literal_eval(covid_data)
return covid_data
url = 'http://103.247.238.92/webportal/pages/covid19.php'
regions = {}
result = {}
p = re.compile(r'DataTable\((\[[\s\S]+\])\)')
with requests.Session() as s:
s.headers = {'User-Agent': 'Mozilla/5.0', 'Referer': 'http://103.247.238.92/webportal/pages/covid19.php'}
soup = bs(s.get(url).content, 'lxml')
divisions = options_dict(soup, '#division option')
for k,v in divisions.items():
r = s.get(f'http://103.247.238.92/webportal/pages/ajaxDataDistrictDHIS2Dashboard.php?division_id={v}')
soup = bs(r.content, 'lxml')
districts = options_dict(soup, '#district option')
regions[k] = districts
s.headers = {'User-Agent': 'Mozilla/5.0','if-modified-since': 'Wed, 08 Jul 2020 22:27:07 GMT'}
for k,v in divisions.items():
result[k] = {}
for k2,v2 in regions.items():
data = {'division': k2, 'district': v2, 'period': 'LAST_12_MONTH', 'Submit': 'Search'}
r = s.get('http://103.247.238.92/webportal/pages/covid19.php', data=data)
result[k][k2] = covid_numbers(r.text)
I used the following code:
library(XML)
library(RCurl)
getGoogleURL <- function(search.term, domain = '.co.uk', quotes=TRUE)
{
search.term <- gsub(' ', '%20', search.term)
if(quotes) search.term <- paste('%22', search.term, '%22', sep='')
getGoogleURL <- paste('http://www.google', domain, '/search?q=',
search.term, sep='')
}
getGoogleLinks <- function(google.url)
{
doc <- getURL(google.url, httpheader = c("User-Agent" = "R(2.10.0)"))
html <- htmlTreeParse(doc, useInternalNodes = TRUE, error=function(...){})
nodes <- getNodeSet(html, "//a[#href][#class='l']")
return(sapply(nodes, function(x) x <- xmlAttrs(x)[[1]]))
}
search.term <- "cran"
quotes <- "FALSE"
search.url <- getGoogleURL(search.term=search.term, quotes=quotes)
links <- getGoogleLinks(search.url)
I would like to find all the links that resulted from my search and I get the following result:
> links
list()
How can I get the links?
In addition I would like to get the headlines and summary of google results how can I get it?
And finally is there a way to get the links that resides in ChillingEffects.org results?
If you look at the htmlvariable, you can see that the search result links all are nested in <h3 class="r"> tags.
Try to change your getGoogleLinks function to:
getGoogleLinks <- function(google.url) {
doc <- getURL(google.url, httpheader = c("User-Agent" = "R
(2.10.0)"))
html <- htmlTreeParse(doc, useInternalNodes = TRUE, error=function
(...){})
nodes <- getNodeSet(html, "//h3[#class='r']//a")
return(sapply(nodes, function(x) x <- xmlAttrs(x)[["href"]]))
}
I created this function to read in a list of company names and then get the top website result for each. It will get you started then you can adjust it as needed.
#libraries.
library(URLencode)
library(rvest)
#load data
d <-read.csv("P:\\needWebsites.csv")
c <- as.character(d$Company.Name)
# Function for getting website.
getWebsite <- function(name)
{
url = URLencode(paste0("https://www.google.com/search?q=",name))
page <- read_html(url)
results <- page %>%
html_nodes("cite") %>% # Get all notes of type cite. You can change this to grab other node types.
html_text()
result <- results[1]
return(as.character(result)) # Return results if you want to see them all.
}
# Apply the function to a list of company names.
websites <- data.frame(Website = sapply(c,getWebsite))]
other solutions here don't work for me, here's my take on #Bryce-Chamberlain's issue which works for me in August 2019, it answers also another closed question : company name to URL in R
# install.packages("rvest")
get_first_google_link <- function(name, root = TRUE) {
url = URLencode(paste0("https://www.google.com/search?q=",name))
page <- xml2::read_html(url)
# extract all links
nodes <- rvest::html_nodes(page, "a")
links <- rvest::html_attr(nodes,"href")
# extract first link of the search results
link <- links[startsWith(links, "/url?q=")][1]
# clean it
link <- sub("^/url\\?q\\=(.*?)\\&sa.*$","\\1", link)
# get root if relevant
if(root) link <- sub("^(https?://.*?/).*$", "\\1", link)
link
}
companies <- data.frame(company = c("apple acres llc","abbvie inc","apple inc"))
companies <- transform(companies, url = sapply(company,get_first_google_link))
companies
#> company url
#> 1 apple acres llc https://www.appleacresllc.com/
#> 2 abbvie inc https://www.abbvie.com/
#> 3 apple inc https://www.apple.com/
Created on 2019-08-10 by the reprex package (v0.2.1)
The free solutions don't work anymore. Plus it doesn't allow you to search for regions outside your location. Here's a solution using Google Custom Search API. The API allows 100 free API calls per day. The function below returns only 10 results or page 1. 1 API call returns only 10 results.
Google.Search.API <- function(keyword, google.key, google.cx, country = "us")
{
# keyword = keywords[10]; country = "us"
url <- paste0("https://www.googleapis.com/customsearch/v1?"
, "key=", google.key
, "&q=", gsub(" ", "+", keyword)
, "&gl=", country # Country
, "&hl=en" # Language from Browser, english
, "&cx=", google.cx
, "&fields=items(link)"
)
d2 <- url %>%
httr::GET(ssl.verifypeer=TRUE) %>%
httr::content(.) %>% .[["items"]] %>%
data.table::rbindlist(.) %>%
mutate(keyword, SERP = row_number(), search.engine = "Google API") %>%
rename(source = link) %>%
select(search.engine, keyword, SERP, source)
pause <- round(runif(1, min = 1.1, max = 5), 1)
if(nrow(d2) == 0)
{cat("\nPausing", pause, "seconds. Failed for:", keyword)} else
{cat("\nPausing", pause, "seconds. Successful for:", keyword)}
Sys.sleep(pause)
rm(keyword, country, pause, url, google.key, google.cx)
return(d2)
}
I have a file with multiple HTML links in it and now want to use dplyr and rvest to get the link to the image per every link of each row.
When I do it manually it works fine and returns the row but when the same code is called within a function it fails with the following error:
Error: no applicable method for 'xml_find_all' applied to an object of
class "factor"
I don't know what I'm doing wrong. Any help is appreciated. In order to make my question more clear I have added (in comments) a few example rows and also shown the manual approach.
library(rvest)
library(dplyr)
library(httr) # contains function stop_for_status()
#get html links from file
#EXAMPLE
# "_id",url
# 560fc55c65818bee0b77ec33,http://www.seriouseats.com/recipes/2011/01/sriracha-ceviche-recipe.html
# 560fc57e65818bee0b78d8b7,http://www.seriouseats.com/recipes/2008/07/pasta-arugula-tomatoes-recipe.html
# 560fc57e65818bee0b78dcde,http://www.seriouseats.com/recipes/2007/08/cook-the-book-minty-boozy-chic.html
# 560fc57e65818bee0b78de93,http://www.seriouseats.com/recipes/2010/02/chipped-beef-gravy-on-toast-stew-on-a-shingle-recipe.html
# 560fc57e65818bee0b78dfe6,http://www.seriouseats.com/recipes/2011/05/dinner-tonight-quinoa-salad-with-lemon-cream.html
# 560fc58165818bee0b78e65e,http://www.seriouseats.com/recipes/2010/10/dinner-tonight-spicy-quinoa-salad-recipe.html
#
#load into SE
#
SE <- read.csv("~/Desktop/SeriousEats.csv")
#
#function to retrieve imgPath per URL
#using rvest
#
getImgPath <- function(x) {
imgPath <- x %>% html_nodes(".photo") %>% html_attr("src")
stop_for_status(res)
return(imgPath)
}
#This works fine
#UrlPage <- read_html ("http://www.seriouseats.com/recipes/2011/01/sriracha-ceviche-recipe.html")
#imgPath <- UrlPage %>% html_nodes(".photo") %>% html_attr("src")
#
#This throws an error msg
#
S <- mutate(SE, imgPath = getImgPath(SE$url))
This works:
library(rvest)
library(dplyr)
# SE <- data_frame(url = c(
# "http://www.seriouseats.com/recipes/2011/01/sriracha-ceviche-recipe.html",
# "http://www.seriouseats.com/recipes/2008/07/pasta-arugula-tomatoes-recipe.html"
# ))
SE <- read.csv('/path/to/SeriousEats.csv', stringsAsFactors = FALSE)
getImgPath <- function(x) {
# x must be "a document, a node set or a single node" per rvest documentation; cannot be a factor or character
imgPath <- read_html(x) %>% html_nodes(".photo") %>% html_attr("src")
# httr::stop_for_status(res) OP said this is not necessary, so I removed
return(imgPath)
}
S <- SE %>%
rowwise() %>%
mutate(imgPath = getImgPath(url))
Thanks for the help and patience and #Jubbles. For the benefit of others here is the complete answer.
library(rvest)
library(dplyr)
SE <- read.csv("~/Desktop/FILE.txt", stringsAsFactors = FALSE)
getImgPath <- function(x) {
if (try(url.exists(x))) {
imgPath <- html(x) %>%
html_nodes(".photo") %>%
html_attr("src")
}
else {
imgPath = "NA"
}
#imgPath
return(imgPath)
}
SE1 <- SE %>%
rowwise() %>%
mutate(imgPath = getImgPath(url))