Someone who know how I can do this code nicer and more efficient? I
also get an message when I try to run dframe if someone know about whats
wrong there.
Column 1 ['Uke 3'] of item 2 is missing in item 1. Use fill=TRUE to
fill with NA (NULL for list columns), or use.names=FALSE to ignore
column names. use.names='check' (default from v1.12.2) emits this
message and proceeds as if use.names=FALSE for backwards
compatibility. See news item 5 in v1.12.2 for options to control this
message
library(rvest)
library(tidyverse)
library(rlist)
#URL
full_timeplan <- list(
'https://timeplan.uit.no/emne_timeplan.php?sem=22v&module%5B%5D=SOK-1005-1&week=1-20&View=list',
'https://timeplan.uit.no/emne_timeplan.php?sem=22v&module%5B%5D=SOK-1006-1&View=list',
'https://timeplan.uit.no/emne_timeplan.php?sem=22v&module%5B%5D=SOK-1016-1&View=list')
page <- url[[1]]%>%
map(read_html)
table <- html_nodes(page, 'table') # one table per week
table <- html_table(table, fill=TRUE) # force them into a list
dframe <- list.stack(table) # stack the list into a data frame
# define first row as variable name
colnames(dframe) <- dframe[1,]
# remove the rows with Dato in it
dframe <- dframe %>% filter(!Dato=="Dato")
# Separate the Dato into two columns:
dframe <- dframe %>% separate(Dato,
into = c("Dag", "Dato"),
sep = "(?<=[A-Za-z])(?=[0-9])")
# code into date format
dframe$Dato <- as.Date(dframe$Dato, format="%d.%m.%Y")
# generate a week variable
dframe$Uke <- strftime(dframe$Dato, format = "%V")
# select
dframe <- dframe %>% select(Dag,Dato,Uke,Tid,Rom)
I tried to extract multiple html_table using rvest package in R using the scripts:
library(rvest)
library(dplyr)
library('xml2')
library(tidyverse)
jump <- seq(1, 2, by = 1)
urls <- paste('https://asbdavani.org/horse/foals/', jump, sep="")
out <- vector("character", length = length(urls))
for(i in seq_along(urls)){
derby <- read_html(urls[i], encoding="UTF-8")
out[i] <- derby %>%
html_table(fill = TRUE)
}
first_table <- out[[1]]
Here, I extracted one of those tables as first_table:
I want to know how can I have links of each character in columns 2, 6, and 7 like this :
This is a tricky problem as you need to select specific columns within a table. The xpath selector "nth-child" provides that ability.
The code below will demonstrate the solution on just 1 table from 1 page in order to simplify the explanation. It should be relative easy to copy and paste into your code.
#Read the page
url<-"https://asbdavani.org/horse/foals/6404"
page <- read_html(url)
#extract the tables from the page
tables <-page %>% html_elements("table")
#In this case we are looking at the second table
#extract each row of the table
rows <-tables[2] %>% html_elements("tr")
#remove the first row since that is the heading
#get the 2nd column from each row
#and parse the "a" html tag from the 2nd column
#retrieve the href link
col2Links <- rows[-1] %>% html_element("td:nth-child(2) a") %>% html_attr("href")
#repeat for columns 6 & 7
col6Links <- rows[-1]%>% html_element("td:nth-child(6) a") %>% html_attr("href")
col7Links <- rows[-1]%>% html_element("td:nth-child(7) a") %>% html_attr("href")
#will need to paste0 "https://asbdavani.org" onto each link.
#col2Links %>% paste0("https://asbdavani.org", .)
#make data.frame
answer <-data.frame(col2Links, col6Links, col7Links)
answer
col2Links col6Links col7Links
1 /horse/performance/13993 /horse/performance/6404 <NA>
2 /horse/performance/13873 /horse/performance/6404 <NA>
3 /horse/performance/533 /horse/performance/6404 /horse/performance/10958
4 /horse/performance/5277 /horse/performance/6404 /horse/performance/11051
5 /horse/performance/5461 /horse/performance/6404 /horse/performance/11049
6 /horse/performance/5602 /horse/performance/6404 /horse/performance/11084
7 /horse/performance/6466 /horse/performance/6404 /horse/performance/11097
8 /horse/performance/11004 /horse/performance/6404 /horse/performance/10994
9 /horse/performance/11113 /horse/performance/6404 /horse/performance/11097
10 /horse/performance/11114 /horse/performance/6404 /horse/performance/11097
11 /horse/performance/11126 /horse/performance/6404 /horse/performance/11119
This is a rather untidy implementation, but works in principle. You can surely make it more concise and I did not relocate and rename the columns according to your exact example.
library(rvest)
library(dplyr)
library('xml2')
library(tidyverse)
jump <- seq(1, 2, by = 1)
urls <- paste('https://asbdavani.org/horse/foals/', jump, sep="")
out <- data.frame()
for(i in seq_along(urls)) {
html <- read_html(urls[i], encoding = "UTF-8")
derby <-
html %>% html_elements("td") %>%
html_children() %>%
html_attr('href')
links <-
matrix(derby,
nrow = length(derby) / 3,
ncol = 3,
byrow = T) %>% as.data.frame()
combined <- html %>%
html_table(fill = TRUE) %>% bind_cols(., tibble(
اسب = links$V1,
سیلمی = links$V2,
مادیان = links$V3
))
out <- bind_rows(out, combined)
}
I have two columns "name" and "link" which indicates the name of the firm and the link to scrape from. How do I put back the name column so that each row of scraped data will have a column with the corresponding name of the firm?
List.Of.Tabs <- map(pages, ~ {
name <- .x[1]
link <- .x[2]
webpage <- read_html(link)
tbls <- html_nodes(webpage, "table")
tbls_ls <- html_table(tbls,fill = TRUE)
pos1 <- possibly(function(tbls) bind_rows(tbls) %>%
filter_all(any_vars(. %in% c("Ireland", "Japan")))
, otherwise = NA)
pos1(tbls_ls)
Results should be something like that:
results <- data.frame(subsidiaries = c('Microsoft Japan','Microsoft Ireland'),
country = c('Japan', 'Ireland'),
name = c('Microsoft','Microsoft'))
Based on the code, we may need to mutate (i.e. creating a column of 'name' from the already created 'name' in the first line of code)
List.Of.Tabs <- map(pages, ~ {
name <- .x[1]
link <- .x[2]
webpage <- read_html(link)
tbls <- html_nodes(webpage, "table")
tbls_ls <- html_table(tbls,fill = TRUE)
pos1 <- possibly(function(tbls) bind_rows(tbls) %>%
filter_all(any_vars(. %in% c("Ireland", "Japan"))) %>%
mutate(name = name) # changed here
, otherwise = NA)
pos1(tbls_ls)
})
So I'm trying to make a data table of some information on a website. This is what I've done so far.
library(rvest)
url <- 'https://uws-community.symplicity.com/index.php?s=student_group'
page <- html_session(url)
name_nodes <- html_nodes(page,".grpl-name a")
name_text <- html_text(name_nodes)
df <- data.frame(matrix(unlist(name_text)), stringsAsFactors = FALSE)
library(tidyverse)
df <- df %>% mutate(id = row_number())
desc_nodes <- html_nodes(page, ".grpl-purpose")
desc_text <- html_text(desc_nodes)
df <- left_join(df, data.frame(matrix(unlist(desc_text)),
stringsAsFactors = FALSE) %>%
mutate(id = row_number()))
email_nodes <- html_nodes(page, ".grpl-contact a")
email_text <- html_text(email_nodes)
df <- left_join(df, data.frame(matrix(unlist(email_text)),
stringsAsFactors = FALSE) %>%
mutate(id = row_number()))
This has been working until I got to the emails part. A few of the entries do not have emails. In the data frame, instead of the appropriate rows showing the NA value for the email, the last three rows show an NA value.
How do I make it so the appropriate rows show have the NA value instead of just the last 3 rows?
The key for solving this problem is to find the 20 parent nodes which are known to exist for each student group. With this list of parent nodes, use the html_node function on each parent node. The html_node function will return one result or NA depending if the desired tag exists. I would recommend this technique, any time there is a variable number of sub nodes.
library(rvest)
library(dplyr)
url <- 'https://uws-community.symplicity.com/index.php?s=student_group'
page <- html_session(url)
#find group names
name_text <- html_nodes(page,".grpl-name a") %>% html_text()
df <- data.frame(name_text, stringsAsFactors = FALSE)
df <- df %>% mutate(id = row_number())
#find text description
desc_text <- html_nodes(page, ".grpl-purpose") %>% html_text()
df$desc_text <- trimws(desc_text)
#find emails
# find the parent nodes with html_nodes
# then find the contact information from each parent using html_node
email_nodes<-html_nodes(page, "div.grpl-grp") %>% html_node( ".grpl-contact a") %>% html_text()
df$emails<-email_nodes
I also took the opportunity to simplify your code, since the lists are all 20 elements long, there is no reason for the unlist/ matrix/ mutate function do add the additional columns onto the data frame.
I have a dataframe uuu_df with records as links of website
dim(uuu_df)
output
1950 1
uuu_df
1) http://www.magicbricks.com/property-for-sale/residential-real-estate?bedroom=1&proptype=Multistorey-Apartment,Builder-Floor-Apartment,Penthouse,Studio-Apartment&cityName=Thane&BudgetMin=5-Lacs&BudgetMax=5-Lacs
2) http://www.magicbricks.com/property-for-sale/residential-real-estate?bedroom=2&proptype=Multistorey-Apartment,Builder-Floor-Apartment,Penthouse,Studio-Apartment&cityName=Thane&BudgetMin=5-Lacs&BudgetMax=5-Lacs
3) http://www.magicbricks.com/property-for-sale/residential-real-estate?bedroom=3&proptype=Multistorey-Apartment,Builder-Floor-Apartment,Penthouse,Studio-Apartment&cityName=Thane&BudgetMin=5-Lacs&BudgetMax=5-Lacs
.
.
.
1950) http://www.magicbricks.com/property-for-sale/residential-real-estate?bedroom=>5&proptype=Multistorey-Apartment,Builder-Floor-Apartment,Penthouse,Studio-Apartment&cityName=Thane&BudgetMin=20-Crores&BudgetMax=20-Crores
here I'm trying to scrape data using those multiple links from the dataframe along with the condition i.e. if the text of html attribute is equal to "No Results Found!" then skip that record and move on to next record,
this is the snippet of that scraping
UrlPage <- html("http://www.magicbricks.com/property-for-sale/residential-real-estate?bedroom=2&proptype=Multistorey-Apartment,Builder-Floor-Apartment,Penthouse,Studio-Apartment&cityName=Thane&BudgetMin=5-Lacs&BudgetMax=5-Lacs")
ImgNode <- UrlPage %>% html_node("div.noResultHead")
u=ImgNode
u=as(u,"character")
u=paste("No",word(string = u, start = 4, end = 5),sep = " ")
Here is what I have tried
wines=data.frame()
url_test=c()
UrlPage_test=c()
u=c()
ImgNode=c()
for(i in 1:dim(uuu_df)[1]){
url_test[i]=as.character(uuu_df[i,])
UrlPage_test[i] <- html(url_test[i])
ImgNode[i] <- UrlPage_test[i] %>% html_node("div.noResultHead")
u[i]=ImgNode[i]
u[i]=as(u[i],"character")
u[i]=paste("No",word(string = u, start = 4, end = 5),sep = " ")
if(u[i]=="No Results Found!") next
{
map_df(1:5, function(i) # here 1:5 is number of webpages of a website
{
# simple but effective progress indicator
cat(".")
pg <- read_html(sprintf(url_test, i))
data.frame(wine=html_text(html_nodes(pg, ".agentNameh")),
excerpt=html_text(html_nodes(pg, ".postedOn")),
locality=html_text(html_nodes(pg,".localityFirst")),
society=html_text(html_nodes(pg,'.labValu .stop-propagation:nth-child(1)')),
stringsAsFactors=FALSE)
}) -> wines
}
But the Wines dataframe gives me empty dataframe with empty rows and columns
Why is it not able to append rows inside it.
Any suggestion will be helpful. Thanks in advance
P.S: dput() of reproduciable data
text1="http://www.magicbricks.com/property-for-sale/residential-real-estate?bedroom="
text2="1"
text3="&proptype="
text4="Multistorey-Apartment,Builder-Floor-Apartment,Penthouse,Studio-Apartment"
text5="&cityName=Thane&BudgetMin="
text6="&BudgetMax="
bhk=c("1","2","3","4","5",">5")
budg_min=c("5-Lacs","10-Lacs","20-Lacs","30-Lacs","40-Lacs","50-Lacs","60-Lacs","70-Lacs","80-Lacs","90-Lacs","1-Crores","1.2-Crores","1.4-Crores","1.6-Crores","1.8-Crores","2-Crores","2.3-Crores","2.6-Crores","3-Crores","3.5-Crores","4-Crores","4.5-Crores","5-Crores","10-Crores","20-Crores")
budg_max=c("5-Lacs","10-Lacs","20-Lacs","30-Lacs","40-Lacs","50-Lacs","60-Lacs","70-Lacs","80-Lacs","90-Lacs","1-Crores","1.2-Crores","1.4-Crores","1.6-Crores","1.8-Crores","2-Crores","2.3-Crores","2.6-Crores","3-Crores","3.5-Crores","4-Crores","4.5-Crores","5-Crores","10-Crores","20-Crores")
eg <- expand.grid(bhk = bhk, budg_min = budg_min, budg_max = budg_max)
eg <- eg[as.integer(eg$budg_min) <= as.integer(eg$budg_max),]
uuu <- sprintf("%s%s%s%s%s%s%s%s", text1,eg[,1],text3,text4,text5,eg[,2],text6,eg[,3])
uuu_df=data.frame(Links=uuu)
dput(uuu_df)
You should take advantage of the document tree to consistently find the elements you need and control the flow of the loop or vectorized function. In the example below I check the result count to determine if there are results, then parse each node individually to ensure it's consistent. Finally, you can bind them if needed.
Side Note: llply has the .progress argument which more elegantly handles the progress indicator you were trying to devise using cat().
options(stringsAsFactors = FALSE)
library(plyr)
library(dplyr)
library(xml2)
uuu_df <- data.frame(x = c('http://www.magicbricks.com/property-for-sale/residential-real-estate?bedroom=1&proptype=Multistorey-Apartment,Builder-Floor-Apartment,Penthouse,Studio-Apartment&cityName=Thane&BudgetMin=5-Lacs&BudgetMax=5-Lacs',
'http://www.magicbricks.com/property-for-sale/residential-real-estate?bedroom=2&proptype=Multistorey-Apartment,Builder-Floor-Apartment,Penthouse,Studio-Apartment&cityName=Thane&BudgetMin=5-Lacs&BudgetMax=5-Lacs',
'http://www.magicbricks.com/property-for-sale/residential-real-estate?bedroom=3&proptype=Multistorey-Apartment,Builder-Floor-Apartment,Penthouse,Studio-Apartment&cityName=Thane&BudgetMin=5-Lacs&BudgetMax=90-Lacs'))
urlList <- llply(uuu_df[,1], function(url){
this_pg <- read_html(url)
results_count <- this_pg %>%
xml_find_first(".//span[#id='resultCount']") %>%
xml_text() %>%
as.integer()
if(results_count > 0){
cards <- this_pg %>%
xml_find_all('//div[#class="SRCard"]')
df <- ldply(cards, .fun=function(x){
y <- data.frame(wine = x %>% xml_find_first('.//span[#class="agentNameh"]') %>% xml_text(),
excerpt = x %>% xml_find_first('.//div[#class="postedOn"]') %>% xml_text(),
locality = x %>% xml_find_first('.//span[#class="localityFirst"]') %>% xml_text(),
society = x %>% xml_find_first('.//div[#class="labValu"]') %>% xml_text() %>% gsub('\\n', '', .))
return(y)
})
} else {
df <- NULL
}
return(df)
}, .progress = 'text')
names(urlList) <- uuu_df[,1]
bind_rows(urlList)
Consider working with one large list built using lapply that iterates through url column of dataframe instead of managing many smaller vectors:
urlList <- lapply(uuu_df[1,], function(url){
UrlPage <- html(as.character(url))
ImgNode <- UrlPage %>% html_node("div.noResultHead")
u <- paste("No", word(string = as(ImgNode, "character"), start=4, end=5), sep=" ")
cat(".")
pg <- read_html(url)
if(u!="No Results Found!") {
df <- data.frame(wine=html_text(html_nodes(pg, ".agentNameh")),
excerpt=html_text(html_nodes(pg, ".postedOn")),
locality=html_text(html_nodes(pg,".localityFirst")),
society=html_text(html_nodes(pg,'.labValu .stop-propagation:nth-child(1)')),
stringsAsFactors=FALSE)
} else {
# ASSIGN EMPTY DATAFRAME (FOR CONSISTENT STRUCTURE)
df <- data.frame(wine=c(), excerpt=c(), locality=c(), society=c())
}
# RETURN NAMED LIST
return(list(UrlPage=UrlPage, ImgNode=ImgNode, u=u, df=df))
})
# ROW BIND ONLY DATAFRAME ELEMENT FROM LIST
wines <- map_df(urlList, function(u) u$df)