someone could help me, this is my problem:
I have a list of urls in a tbl and I have to extract the jpg nane.
this is the url
https://content_xxx.xxx.com/vp/969ffffff61/5C55ABEB/t51.2ff5-15/e35/13643048_612108275661958_805860992_n.jpg?ff_cache_key=fffffQ%3ff%3D.2
and this one the part to extract
13643048_612108275661958_805860992_n
thanks for helps
This requires two things:
parse the URL itself
get the filename from the path of the URL
You can do both manually but it’s much better to use existing tools. The first part is solved by the parseURI function from the ‹XML› package:
uri = 'https://content_xxx.xxx.com/vp/969ffffff61/5C55ABEB/t51.2ff5-15/e35/13643048_612108275661958_805860992_n.jpg?ff_cache_key=fffffQ%3ff%3D.2
parts = XML::parseURI(uri)
And the second part is trivially solved by the basename function:
filename = basename(parts$path)
Googling for "R parse URL" could have saved you from typing ~400 keystrokes (tho I expect the URL was pasted).
In any event, you want to process a vector of these things, so there's a better way. In fact there are multiple ways to do this URL path extraction in R. Here are 3:
library(stringi)
library(urltools)
library(httr)
library(XML)
library(dplyr)
We'll generate 100 unique URLs that fit the same Instagram pattern (NOTE: scraping instagram is a violation of their ToS & controlled by robots.txt. If your URLs did not come from the Instagram API, please let me know so I can delete this answer as I don't help content thieves).
set.seed(0)
paste(
"https://content_xxx.xxx.com/vp/969ffffff61/5C55ABEB/t51.2ff5-15/e35/13643048_612108275661958_805860992_n.jpg?ff_cache_key=fffffQ%3ff%3D.2",
stri_rand_strings(100, 8, "[0-9]"), "_",
stri_rand_strings(100, 15, "[0-9]"), "_",
stri_rand_strings(100, 9, "[0-9]"), "_",
stri_rand_strings(100, 1, "[a-z]"),
".jpg?ff_cache_key=MTMwOTE4NjEyMzc1OTAzOTc2NQ%3D%3D.2",
sep=""
) -> img_urls
head(img_urls)
## [1] "https://content_xxx.xxx.com/vp/969ffffff61/5C55ABEB/t51.2ff5-15/e35/13643048_612108275661958_805860992_n.jpg?ff_cache_key=fffffQ%3ff%3D.2"
## [2] "https://https://content_xxx.xxx.com/vp/969b7087cc97408ccee167d473388761/5C55ABEB/t51.2885-15/e35/66021637_359927357880233_471353444_q.jpg?ff_cache_key=MTMwOTE4NjEyMzc1OTAzOTc2NQ%3D%3D.2"
## [3] "https://https://content_xxx.xxx.com/vp/969b7087cc97408ccee167d473388761/5C55ABEB/t51.2885-15/e35/47937926_769874508959124_426288550_z.jpg?ff_cache_key=MTMwOTE4NjEyMzc1OTAzOTc2NQ%3D%3D.2"
## [4] "https://https://content_xxx.xxx.com/vp/vp/969b7087cc97408ccee167d473388761/5C55ABEB/t51.2885-15/e35/12303834_440673970920272_460810703_n.jpg?ff_cache_key=MTMwOTE4NjEyMzc1OTAzOTc2NQ%3D%3D.2"
## [5] "https://https://content_xxx.xxx.com/vp/969b7087cc97408ccee167d473388761/5C55ABEB/t51.2885-15/e35/54186717_202600346704982_713363439_y.jpg?ff_cache_key=MTMwOTE4NjEyMzc1OTAzOTc2NQ%3D%3D.2"
## [6] "https://https://content_xxx.xxx.com/vp/969b7087cc97408ccee167d473388761/5C55ABEB/t51.2885-15/e35/48675570_402479399847865_689787883_e.jpg?ff_cache_key=MTMwOTE4NjEyMzc1OTAzOTc2NQ%3D%3D.2"
Now, let's try to parse those URLs:
invisible(urltools::url_parse(img_urls))
invisible(httr::parse_url(img_urls))
## Error in httr::parse_url(img_urls): length(url) == 1 is not TRUE
DOH! httr can't do it.
invisible(XML::parseURI(img_urls))
## Error in if (is.na(uri)) return(structure(as.character(uri), class = "URI")): the condition has length > 1
DOH! XML can't do it either.
That means we need to use an sapply() crutch for httr and XML to get the path component (you can run basename() on any resultant vector as Konrad showed):
data_frame(
urltools = urltools::url_parse(img_urls)$path,
httr = sapply(img_urls, function(URL) httr::parse_url(URL)$path, USE.NAMES = FALSE),
XML = sapply(img_urls, function(URL) XML::parseURI(URL)$path, USE.NAMES = FALSE)
) -> paths
glimpse(paths)
## Observations: 100
## Variables: 3
## $ urltools <chr> "vp/969b7087cc97408ccee167d473388761/5C55ABEB/t51.2885-15/e35/82359289_380972639303339_908467218_h...
## $ httr <chr> "vp/969b7087cc97408ccee167d473388761/5C55ABEB/t51.2885-15/e35/82359289_380972639303339_908467218_h...
## $ XML <chr> "/vp/969b7087cc97408ccee167d473388761/5C55ABEB/t51.2885-15/e35/82359289_380972639303339_908467218_...
Note the not really standard inclusion of the initial, / in the path from XML. That's not important for you for this example, but it's important to note the difference in general.
We'll process one of them since XML and httr have that woeful limitation:
microbenchmark::microbenchmark(
urltools = urltools::url_parse(img_urls[1])$path,
httr = httr::parse_url(img_urls[1])$path,
XML = XML::parseURI(img_urls[1])$path
)
## Unit: microseconds
## expr min lq mean median uq max neval
## urltools 351.268 397.6040 557.09641 499.2220 618.5945 1309.454 100
## httr 550.298 619.5080 843.26520 717.0705 888.3915 4213.070 100
## XML 11.858 16.9115 27.97848 26.1450 33.9065 109.882 100
XML looks faster, but it's not in practice:
microbenchmark::microbenchmark(
urltools = urltools::url_parse(img_urls)$path,
httr = sapply(img_urls, function(URL) httr::parse_url(URL)$path, USE.NAMES = FALSE),
XML = sapply(img_urls, function(URL) XML::parseURI(URL)$path, USE.NAMES = FALSE)
)
## Unit: microseconds
## expr min lq mean median uq max neval
## urltools 718.887 853.374 1093.404 918.3045 1146.540 2872.076 100
## httr 58513.970 64738.477 80697.548 68908.7635 81549.154 224157.857 100
## XML 1155.370 1245.415 2012.660 1359.8215 1880.372 26184.943 100
If you really want to go the regex route, you can read the RFC for the URL BNF and a naive regex for hacking bits out of one and Google for the seminal example that has over a dozen regular expressions that handle not-so-well-formed URIs, but parsing is generally a better strategy for diverse URL content. For your case, splitting and regex'ing might work just fine but it isn't necessarily going to be that much faster than parsing:
microbenchmark::microbenchmark(
urltools = tools::file_path_sans_ext(basename(urltools::url_parse(img_urls)$path)),
httr = tools::file_path_sans_ext(basename(sapply(img_urls, function(URL) httr::parse_url(URL)$path, USE.NAMES = FALSE))),
XML = tools::file_path_sans_ext(basename(sapply(img_urls, function(URL) XML::parseURI(URL)$path, USE.NAMES = FALSE))),
regex = stri_match_first_regex(img_urls, "/([[:digit:]]{8}_[[:digit:]]{15}_[[:digit:]]{9}_[[:alpha:]]{1})\\.jpg\\?")[,2]
)
## Unit: milliseconds
## expr min lq mean median uq max neval
## urltools 1.140421 1.228988 1.502525 1.286650 1.444522 6.970044 100
## httr 56.563403 65.696242 77.492290 69.809393 80.075763 157.657508 100
## XML 1.513174 1.604012 2.039502 1.702018 1.931468 11.306436 100
## regex 1.137204 1.223683 1.337675 1.260339 1.397273 2.241121 100
As noted in that final example, you'll need to run tools::file_path_sans_ext() on the result to remove the .jpg (or sub() it away).
Related
I have a CSV file of size ~1 GB, and as my laptop is of basic configuration, I'm not able to open the file in Excel or R. But out of curiosity, I would like to get the number of rows in the file. How am I to do it, if at all I can do it?
For Linux/Unix:
wc -l filename
For Windows:
find /c /v "A String that is extremely unlikely to occur" filename
Option 1:
Through a file connection, count.fields() counts the number of fields per line of the file based on some sep value (that we don't care about here). So if we take the length of that result, theoretically we should end up with the number of lines (and rows) in the file.
length(count.fields(filename))
If you have a header row, you can skip it with skip = 1
length(count.fields(filename, skip = 1))
There are other arguments that you can adjust for your specific needs, like skipping blank lines.
args(count.fields)
# function (file, sep = "", quote = "\"'", skip = 0, blank.lines.skip = TRUE,
# comment.char = "#")
# NULL
See help(count.fields) for more.
It's not too bad as far as speed goes. I tested it on one of my baseball files that contains 99846 rows.
nrow(data.table::fread("Batting.csv"))
# [1] 99846
system.time({ l <- length(count.fields("Batting.csv", skip = 1)) })
# user system elapsed
# 0.528 0.000 0.503
l
# [1] 99846
file.info("Batting.csv")$size
# [1] 6153740
(The more efficient) Option 2: Another idea is to use data.table::fread() to read the first column only, then take the number of rows. This would be very fast.
system.time(nrow(fread("Batting.csv", select = 1L)))
# user system elapsed
# 0.063 0.000 0.063
Estimate number of lines based on size of first 1000 lines
size1000 <- sum(nchar(readLines(con = "dgrp2.tgeno", n = 1000)))
sizetotal <- file.size("dgrp2.tgeno")
1000 * sizetotal / size1000
This is usually good enough for most purposes - and is a lot faster for huge files.
Here is something I used:
testcon <- file("xyzfile.csv",open="r")
readsizeof <- 20000
nooflines <- 0
( while((linesread <- length(readLines(testcon,readsizeof))) > 0 )
nooflines <- nooflines+linesread )
close(testcon)
nooflines
Check out this post for more:
https://www.r-bloggers.com/easy-way-of-determining-number-of-linesrecords-in-a-given-large-file-using-r/
Implementing Tony's answer in R:
file <- "/path/to/file"
cmd <- paste("wc -l <", file)
as.numeric(system(cmd, intern = TRUE))
This is about 4x faster than data.table for a file with 100k lines
> microbenchmark::microbenchmark(
+ nrow(fread("~/Desktop/cmx_bool.csv", select = 1L)),
+ as.numeric(system("wc -l <~/Desktop/cmx_bool.csv", intern = TRUE))
+ )
Unit: milliseconds
expr min lq
nrow(fread("~/Desktop/cmx_bool.csv", select = 1L)) 128.06701 131.12878
as.numeric(system("wc -l <~/Desktop/cmx_bool.csv", intern = TRUE)) 27.70863 28.42997
mean median uq max neval
150.43999 135.1366 142.99937 629.4880 100
34.83877 29.5070 33.32973 270.3104 100
TL;DR : I want to complete each string of a list to a given size by a given character on left. I want it fast. See code below and exemple
I have veeeeery large vector of strings, containing... well anything, but with a maximum (known) number of character. I want to complete thoose strings by left Zero's to a given size (superior to the maximum number of char)
suppose :
c("yop",NA,"1234567","19","12AN","PLOP","5689777")
Given for exemple an objective size of 10, i want :
[1] "0000000yop" NA "0001234567" "0000000019" "00000012AN" "000000PLOP" "0005689777"
as a result, as fast as possible.
I've tried to write my own, but it's not really fast... Could you help me making it faster ? I have billions of thoose to treat.
Here's my actual code :
library(purrr)
zero_left <- function(field,nb){
map2_chr(
map(abs(nb-nchar(field)),~ rep("0",.x)),
field,
~ paste0(c(.x,.y),collapse=""))
}
trial <- c("yop","1234567","19","12AN","PLOP","5689777")
zero_left(trial,10)
This code does not even treat the NA case... But without it it works, but too slow.
This relies on an external package but takes 1/30 of the time your zero_left() function takes:
nb <- 10
stringr::str_pad(trial, width=nb, pad="0")
[1] "0000000yop" "0001234567" "0000000019" "00000012AN" "000000PLOP" "0005689777"
Edit 1:
Base-R solution that is seems probably isn't just as fast:
gsub(pattern = " ", replacement = "0", sprintf("%*s", nb, trial), fixed = TRUE)
Edit 2:
Remembering that stringr is just a wrapper for stringi functions you can get another speedboost by using stringi directly:
stringi::stri_pad_left(trial, width = nb, pad = "0")
If speed is your concern, base R can be faster than stringr/stringi:
library(microbenchmark)
microbenchmark(
stringr=stringr::str_pad(trial, width=nb, pad="0"),
stringi=stringi::stri_pad_left(trial, width = nb, pad = "0"),
base=paste(strrep("0", nb - nchar(trial)), trial, sep="")
)
# Unit: microseconds
# expr min lq mean median uq max neval
# stringr 21.292 22.747 24.87188 23.7070 24.4735 129.470 100
# stringi 10.473 12.359 13.15298 13.0180 13.5445 21.418 100
# base 7.848 9.392 10.83702 10.2035 10.8980 43.620 100
The only consequence is that the NA is turned into a literal "NANA" here
paste(strrep("0", nb - nchar(trial)), trial, sep="")
# [1] "0000000yop" "NANA" "0001234567" "0000000019" "00000012AN"
# [6] "000000PLOP" "0005689777"
so the workaround is
microbenchmark(
stringr=stringr::str_pad(trial, width=nb, pad="0"),
stringi=stringi::stri_pad_left(trial, width = nb, pad = "0"),
base={v=paste(strrep("0", nb - nchar(trial)), trial, sep="");v[is.na(trial)]=NA;}
)
# Unit: microseconds
# expr min lq mean median uq max neval
# stringr 20.657 22.6440 23.99204 23.3870 24.6190 60.096 100
# stringi 10.980 12.1585 13.57061 13.0790 13.7800 64.135 100
# base 10.766 11.9185 13.69714 13.0665 13.8035 87.226 100
(Which makes base R about as fast as stringi and slightly faster than stringr, in this case.)
(I'm mildly annoyed that paste converts NA to "NA", though that's already been addressed here on SO.)
I have a CSV file of size ~1 GB, and as my laptop is of basic configuration, I'm not able to open the file in Excel or R. But out of curiosity, I would like to get the number of rows in the file. How am I to do it, if at all I can do it?
For Linux/Unix:
wc -l filename
For Windows:
find /c /v "A String that is extremely unlikely to occur" filename
Option 1:
Through a file connection, count.fields() counts the number of fields per line of the file based on some sep value (that we don't care about here). So if we take the length of that result, theoretically we should end up with the number of lines (and rows) in the file.
length(count.fields(filename))
If you have a header row, you can skip it with skip = 1
length(count.fields(filename, skip = 1))
There are other arguments that you can adjust for your specific needs, like skipping blank lines.
args(count.fields)
# function (file, sep = "", quote = "\"'", skip = 0, blank.lines.skip = TRUE,
# comment.char = "#")
# NULL
See help(count.fields) for more.
It's not too bad as far as speed goes. I tested it on one of my baseball files that contains 99846 rows.
nrow(data.table::fread("Batting.csv"))
# [1] 99846
system.time({ l <- length(count.fields("Batting.csv", skip = 1)) })
# user system elapsed
# 0.528 0.000 0.503
l
# [1] 99846
file.info("Batting.csv")$size
# [1] 6153740
(The more efficient) Option 2: Another idea is to use data.table::fread() to read the first column only, then take the number of rows. This would be very fast.
system.time(nrow(fread("Batting.csv", select = 1L)))
# user system elapsed
# 0.063 0.000 0.063
Estimate number of lines based on size of first 1000 lines
size1000 <- sum(nchar(readLines(con = "dgrp2.tgeno", n = 1000)))
sizetotal <- file.size("dgrp2.tgeno")
1000 * sizetotal / size1000
This is usually good enough for most purposes - and is a lot faster for huge files.
Here is something I used:
testcon <- file("xyzfile.csv",open="r")
readsizeof <- 20000
nooflines <- 0
( while((linesread <- length(readLines(testcon,readsizeof))) > 0 )
nooflines <- nooflines+linesread )
close(testcon)
nooflines
Check out this post for more:
https://www.r-bloggers.com/easy-way-of-determining-number-of-linesrecords-in-a-given-large-file-using-r/
Implementing Tony's answer in R:
file <- "/path/to/file"
cmd <- paste("wc -l <", file)
as.numeric(system(cmd, intern = TRUE))
This is about 4x faster than data.table for a file with 100k lines
> microbenchmark::microbenchmark(
+ nrow(fread("~/Desktop/cmx_bool.csv", select = 1L)),
+ as.numeric(system("wc -l <~/Desktop/cmx_bool.csv", intern = TRUE))
+ )
Unit: milliseconds
expr min lq
nrow(fread("~/Desktop/cmx_bool.csv", select = 1L)) 128.06701 131.12878
as.numeric(system("wc -l <~/Desktop/cmx_bool.csv", intern = TRUE)) 27.70863 28.42997
mean median uq max neval
150.43999 135.1366 142.99937 629.4880 100
34.83877 29.5070 33.32973 270.3104 100
When using a lookup by name in a list, it is possible to first turn the list into an environment with hashing. For example:
x <- 1:1e5
names(x) <- x
lx <- as.list(x)
elx <- list2env(lx, hash = TRUE) # takes some time
library(microbenchmark)
microbenchmark(x[[which(x==1000)]], x[["1000"]], lx[["1000"]], get("1000", envir = elx), elx[["1000"]])
With the following performance gain:
> microbenchmark(x[[which(x==1000)]], x[["1000"]], lx[["1000"]], get("1000", envir = elx), elx[["1000"]])
Unit: nanoseconds
expr min lq mean median uq max neval cld
x[[which(x == 1000)]] 547213 681609.5 1063382.25 720718.5 788538.5 5999776 100 b
x[["1000"]] 6518 6829.0 7961.83 7139.0 8070.0 22659 100 a
lx[["1000"]] 6518 6829.0 8284.63 7140.0 8070.5 33212 100 a
get("1000", envir = elx) 621 931.0 2477.22 1242.0 2794.0 20175 100 a
elx[["1000"]] 0 1.0 1288.47 311.0 1552.0 22659 100 a
When looking at the help page for list2env:
(for the case envir = NULL): logical indicating if the created
environment should use hashing, see new.env.
When looking at the help for new.env, it doesn't explain how the hash table is created, but it does say:
For the performance implications of hashing or not, see
https://en.wikipedia.org/wiki/Hash_table.
So it's obvious that hashing is done, and works well (at least for the example I gave), but seeing from the Wikipedia page, it is clear there are various ways of creating hash tables. Hence, my question is: how is the hash table created in list2env?
I have large objects in R, that barely fits in my 16GB memory (a data.table database of >4M records, >400 variables).
I'd like to have a hash function that will be used to confirm, that the database loaded into R is not modified.
One fast way to do that is to calculate the database's hash with the previously stored hash.
The problem is that digest::digest function copies (serializes) the data, and only after all data are serialized it will calculate the hash. Which is too late on my hardware... :-(
Does anyone know about a way around this problem?
There is a poor's man solution: save the object into the file, and calculate the hash of the file. But it introduces large, unnecessary overhead (I have to make sure there is a spare on HDD for yet another copy, and need to keep track of all the files that may not be automatically deleted)
Similar problem has been described in our issue tracker here:
https://github.com/eddelbuettel/digest/issues/33
The current version of digest can read a file to compute the hash.
Therefore, at least on Linux, we can use a named pipe which will be read by the digest package (in one thread) and from the other side data will be written by another thread.
The following code snippet shows how we can compute a MD5 hash from 10 number by feeding the digester first with 1:5 and then 6:10.
library(parallel)
library(digest)
x <- as.character(1:10) # input
fname <- "mystream.fifo" # choose name for your named pipe
close(fifo(fname, "w")) # creates your pipe if does not exist
producer <- mcparallel({
mystream <- file(fname, "w")
writeLines(x[1:5], mystream)
writeLines(x[6:10], mystream)
close(mystream) # sends signal to the consumer (digester)
})
digester <- mcparallel({
digest(fname, file = TRUE, algo = "md5") # just reads the stream till signalled
})
# runs both processes in parallel
mccollect(list(producer, digester))
unlink(fname) # named pipe removed
UPDATE: Henrik Bengtsson provided a modified example based on futures:
library("future")
plan(multiprocess)
x <- as.character(1:10) # input
fname <- "mystream.fifo" # choose name for your named pipe
close(fifo(fname, open="wb")) # creates your pipe if does not exists
producer %<-% {
mystream <- file(fname, open="wb")
writeBin(x[1:5], endian="little", con=mystream)
writeBin(x[6:10], endian="little", con=mystream)
close(mystream) # sends signal to the consumer (digester)
}
# just reads the stream till signalled
md5 <- digest::digest(fname, file = TRUE, algo = "md5")
print(md5)
## [1] "25867862802a623c16928216e2501a39"
# Note: Identical on Linux and Windows
Following up on nicola's comment, here's a benchmark of the column-wise idea. It seems it doesn't help much, at least not for these at this size. iris is 150 rows, long_iris is 3M (3,000,000).
library(microbenchmark)
#iris
nrow(iris)
microbenchmark(
whole = digest::digest(iris),
cols = digest::digest(lapply(iris, digest::digest))
)
#long iris
long_iris = do.call(bind_rows, replicate(20e3, iris, simplify = F))
nrow(long_iris)
microbenchmark(
whole = digest::digest(long_iris),
cols = digest::digest(lapply(long_iris, digest::digest))
)
Results:
#normal
Unit: milliseconds
expr min lq mean median uq max neval cld
whole 12.6 13.6 14.4 14.0 14.6 24.9 100 b
cols 12.5 12.8 13.3 13.1 13.5 23.0 100 a
#long
Unit: milliseconds
expr min lq mean median uq max neval cld
whole 296 306 317 311 316 470 100 b
cols 261 276 290 282 291 429 100 a