How to work with all subsets in vectorized way - r

I have a stock price dataframe containing a lot of symbols and I would like to perform operations on subsets for every symbol in a vectorized way. My data is :
head(dataset)
date open high low close volume symbol
1 2014-08-29 34.59 34.6800 34.59 34.6800 200 AAIT
2 2014-08-28 34.96 34.9600 34.96 34.9600 211 AAIT
3 2014-08-27 35.28 35.2800 35.28 35.2800 507 AAIT
4 2014-08-26 35.02 35.0200 35.02 35.0200 00 AAIT
5 2014-08-25 34.57 35.0200 34.57 35.0200 385 AAIT
6 2014-08-22 34.80 34.8299 34.80 34.8299 802 AAIT
For every symbol I would like to do something like that :
for (symb in unique(dataset$symbol){
dataset$night = with(subset(dataset, dataset$symbol == symb), open[-length(open)]-close[-1])
}
This causes the last row to be filled with NA so I can't do that on the whole dataframe. I could replace the last line afterwards but I would prefer to work with the subsets for more convenience. Is it possible to do the for loop in a vectorized way (for loops are very slow on r, it can become a problem if I have too many symbols)

You could use dplyr:
library(dplyr)
dataset <- dataset %>%
group_by(symbol) %>%
mutate(night = c(head(open, -1) - tail(close, -1), NA))
or plyr:
library(plyr)
dataset <- ddply(dataset, .(symbol), mutate,
night = c(head(open, -1) - tail(close, -1), NA))
or data.table:
library(data.table)
dt <- data.table(dataset)
setkey(dt, symbol)
dt[, night := c(head(open, -1) - tail(close, -1), NA), by = symbol]

Related

How can I iterate a function over specific columns of a series of dataframes where I can set the order?

I work for an insurance company and I am trying to improve something that I built. I have about 150 data frames that look like this:
library(data.table)
dt_Premium<-data.table(Policy = c("Pol123","Pol333","Pol555","Pol999"),
Base_Premium_Fire= c(45,55,105,92),
Base_Premium_Water= c(20,21,24,29),
Base_Premium_Theft= c(3,5,6,7))
dt_Discount_Factors<-data.table(Policy = c("Pol123","Pol333","Pol555","Pol999"),
Discount_Factor_Fire= c(.9,.95,.99,.97),
Discount_Factor_Water= c(.8,.85,.9,.96),
Discount_Factor_Theft= c(1,1,1,1))
dt_Territory_Factors<-data.table(Policy = c("Pol123","Pol333","Pol555","Pol999"),
Territory_Factor_Fire= c(1.9,1.2,.91,1.03),
Territory_Factor_Water= c(1.03,1.3,1.25,1.01),
Territory_Factor_Theft= c(1,1.5,1,.5))
dt_Fixed_Expense<-data.table(Policy = c("Pol123","Pol333","Pol555","Pol999"),
Fixed_Expense_Fire= c(5,5,5,5),
Fixed_Expense_Water= c(7,7,7,7),
Fixed_Expense_Theft= c(9,9,9,9))
I take the base premium and then I multiply by factors, and then add a fixed expense at the very end. My code is currently something like:
dt_Final_Premium<-cbind(dt_Premium[,1],dt_Premium[,2:4]*
dt_Discount_Factors[,2:4]*
dt_Territory_Factors[,2:4]+
dt_Fixed_Expense[,2:4])
What I hate about this:
-The 2:4 stuff (I would like to be able to use a named range)
-The typing is monstrous considering all of the tables and policies I actually have
-It is very confusing for anybody except me (the author) to understand and edit/adjust the code
-I would like to be able to have each rating step as part of a list, and then just iterate over that list (or a similar process).
-Ideally I would be able to get the values at each step. For example :
step2_answer<-cbind(dt_Premium[,1],dt_Premium[,2:4]*
dt_Discount_Factors[,2:4])
There just has to be a way were I can take a dataframe/datatable and then just multiply or add to the next dataframe/datatable in the series. Thanks for taking a look at this?
How about something like this using dplyr?!
Here I am using the same calculation that you have mentioned but row wise using mutate function of dplyr which makes it clear to see the step by step and for anyone to understand the calculation easily.
library(data.table)
library(dplyr)
dt_Premium <- data.table(Policy = c("Pol123","Pol333","Pol555","Pol999"),
Base_Premium_Fire= c(45,55,105,92),
Base_Premium_Water= c(20,21,24,29),
Base_Premium_Theft= c(3,5,6,7))
dt_Discount_Factors <- data.table(Policy = c("Pol123","Pol333","Pol555","Pol999"),
Discount_Factor_Fire= c(.9,.95,.99,.97),
Discount_Factor_Water= c(.8,.85,.9,.96),
Discount_Factor_Theft= c(1,1,1,1))
dt_Territory_Factors <- data.table(Policy = c("Pol123","Pol333","Pol555","Pol999"),
Territory_Factor_Fire= c(1.9,1.2,.91,1.03),
Territory_Factor_Water= c(1.03,1.3,1.25,1.01),
Territory_Factor_Theft= c(1,1.5,1,.5))
dt_Fixed_Expense <- data.table(Policy = c("Pol123","Pol333","Pol555","Pol999"),
Fixed_Expense_Fire= c(5,5,5,5),
Fixed_Expense_Water= c(7,7,7,7),
Fixed_Expense_Theft= c(9,9,9,9))
dt_Final_Premium <- cbind(dt_Premium[,1],dt_Premium[,2:4]*
dt_Discount_Factors[,2:4]*
dt_Territory_Factors[,2:4]+
dt_Fixed_Expense[,2:4])
new_dt_final_premium <-
dt_Premium %>%
# Joining all tables together
left_join(dt_Discount_Factors, by = "Policy") %>%
left_join(dt_Territory_Factors, by = "Policy") %>%
left_join(dt_Fixed_Expense, by = "Policy") %>%
# Calculating required calculation
mutate(
Base_Premium_Fire =
Base_Premium_Fire * Discount_Factor_Fire * Territory_Factor_Fire + Fixed_Expense_Fire,
Base_Premium_Water =
Base_Premium_Water * Discount_Factor_Water * Territory_Factor_Water + Fixed_Expense_Water,
Base_Premium_Theft =
Base_Premium_Theft * Discount_Factor_Theft * Territory_Factor_Theft + Fixed_Expense_Theft) %>%
select(Policy, Base_Premium_Fire, Base_Premium_Water, Base_Premium_Theft)
Since your columns have a clean naming, some pivoting may do the work:
library(tidyverse) #to be run after library(data.table)
dt_Premium %>%
left_join(dt_Discount_Factors, by="Policy") %>%
left_join(dt_Territory_Factors, by="Policy") %>%
left_join(dt_Fixed_Expense, by="Policy") %>%
pivot_longer(cols=-Policy)%>%
separate(name, into=c("name", "object"), sep="_.*_") %>%
pivot_wider() %>%
mutate(total=Base*Discount*Territory+Fixed) %>% #or calculate the value for a specific step
select(Policy, object, total) %>%
pivot_wider(names_from = "object", values_from = "total")
After joining all the columns, you can pivot to a long format and turn columns to rows. There, you can separate the name into the real name (Base, Discount, Fixed...) and the object (Fire, Water, ...) and return to the wide format. The tricky part is to get a good regular expression, as your names use the underscore twice. Mine can be vastly improved but will do the work for now.
After this, you can calculate whatever you want, select only the result and pivot to wide one last time. If you want to get all the results, you may tweak this last pivot with prefixes.
Pivoting is quite a gymnastics, but it has proven to be very effective once you get used to it.
As you have a lot of tables, if you can get them as a list, you can also use purrr::reduce to join them all at once and simplify the first lines of code:
list(dt_Premium, dt_Discount_Factors, dt_Territory_Factors, dt_Fixed_Expense) %>%
reduce(left_join, by='Policy') %>%
pivot_longer(cols=-Policy)%>%
separate(name, into=c("name", "object"), sep="_.*_") %>%
pivot_wider() %>%
mutate(total=Base*Discount*Territory+Fixed) %>% #of calculate the value for a specific step
select(Policy, object, total) %>%
pivot_wider(names_from = "object", values_from = "total")
Another option is to reorganize the data by converting into a long format, merge and then perform the calculations:
DT <- Reduce(merge, lapply(dtList, function(d) {
vn <- sub('_([^_]*)$', '', names(d)[2L]) #see reference [1]
melt(d, id.vars="Policy", value.name=vn)[,
variable := gsub("(.*)_(.*)_(.*)", "\\3", variable)]
}))
DT
DT[, disc_prem := Base_Premium * Discount_Factor][,
disc_prem_loc := disc_prem * Territory_Factor][,
Final_Premium := disc_prem_loc + Fixed_Expense]
output:
Policy variable Base_Premium Discount_Factor Territory_Factor Fixed_Expense disc_prem disc_prem_loc Final_Premium
1: Pol123 Fire 45 0.90 1.90 5 40.50 76.9500 81.9500
2: Pol123 Theft 3 1.00 1.00 9 3.00 3.0000 12.0000
3: Pol123 Water 20 0.80 1.03 7 16.00 16.4800 23.4800
4: Pol333 Fire 55 0.95 1.20 5 52.25 62.7000 67.7000
5: Pol333 Theft 5 1.00 1.50 9 5.00 7.5000 16.5000
6: Pol333 Water 21 0.85 1.30 7 17.85 23.2050 30.2050
7: Pol555 Fire 105 0.99 0.91 5 103.95 94.5945 99.5945
8: Pol555 Theft 6 1.00 1.00 9 6.00 6.0000 15.0000
9: Pol555 Water 24 0.90 1.25 7 21.60 27.0000 34.0000
10: Pol999 Fire 92 0.97 1.03 5 89.24 91.9172 96.9172
11: Pol999 Theft 7 1.00 0.50 9 7.00 3.5000 12.5000
12: Pol999 Water 29 0.96 1.01 7 27.84 28.1184 35.1184
data:
dtLs <- list(dt_Premium, dt_Discount_Factors, dt_Territory_Factors, dt_Fixed_Expense)
Reference:
regex-return-all-before-the-second-occurrence
I am guessing reading some of rdata.table vignettes would help you tighten up syntax and make it more terse. Some of us think terse = 'more readable' in numeric programming. Others think that represents some level of insanity:
vignette(package="data.table")
Understanding Map, Reduce, mget and other functional notation in R and rdata.table may help. Here are some things I have done from a data.table mindset:
Dropping cols syntax might be more terse using 'i' to drop a vector of cols:
dt[is.na(dt)] <- 0 # replace NA with 0
drop_col_list <- c('dropcol1','dropcol2','dropcol3') # drop col list
# dt <- dt[!drop_col_list,sapply(dt,as.numeric)] # make selected dt cols numeric type
dt[!drop_col_list,SumCol := Reduce(`+`, dt)] # adds Sum col with 'functional programming' iteration
The lapply(.SD, func) format is very powerful:
fsum <- function(x) {sum(x,na.rm=TRUE)}
dt[,lapply(.SD,fsum),by=,.SDcols=c("col1","col2","col3","col4")]
# or
dt[!drop_col_list,lapply(.SD,fsum)]
This shows applying the internal data.table 'set' function (':=') and mget to create cols derived from operations with functional programming on two data.tables. The data.table(s) may need to have the same nrow():
nm1 <- names(dt1)[1:4]
nm2 <- names(dt2)[1:4]
dt[, SumCol := Reduce(`+`, Map(`*`, mget(nm1), mget(nm2)))]
The loop below isn't really rdata.table'esq' programming but outputs a data.table. Probably this isn't as fast as more data.table like syntax:
seqXpi <- function(x) {x * pi}
seqXexp <- function(x) {x * exp(1)}
l <- {};
for(x in seq(1,10,1)) l <- as.data.table(rbind(l,cbind(seq=x,seqXpi=seqXpi(x),seqXexp=seqXexp(x))))

return output as columns instead of list after applying a function using dplyr [duplicate]

I have a dataset with several columns, one of which is a column for reaction times. These reaction times are comma separated to denote the reaction times (of the same participant) for the different trials.
For example: row 1 (i.e.: the data from participant 1) has the following under the column "reaction times"
reaction_times
2000,1450,1800,2200
Hence these are the reaction times of participant 1 for trials 1,2,3,4.
I now want to create a new data set in which the reaction times for these trials all form individual columns. This way I can calculate the mean reaction time for each trial.
trial 1 trial 2 trial 3 trial 4
participant 1: 2000 1450 1800 2200
I tried the colsplit from the reshape2 package but that doesn't seem to split my data into new columns (perhaps because my data is all in 1 cell).
Any suggestions?
I think you are looking for the strsplit() function;
a = "2000,1450,1800,2200"
strsplit(a, ",")
[[1]]
[1] "2000" "1450" "1800" "2200"
Notice that strsplit returns a list, in this case with only one element. This is because strsplit takes vectors as input. Therefore, you can also put a long vector of your single cell characters into the function and get back a splitted list of that vector. In a more relevant example this look like:
# Create some example data
dat = data.frame(reaction_time =
apply(matrix(round(runif(100, 1, 2000)),
25, 4), 1, paste, collapse = ","),
stringsAsFactors=FALSE)
splitdat = do.call("rbind", strsplit(dat$reaction_time, ","))
splitdat = data.frame(apply(splitdat, 2, as.numeric))
names(splitdat) = paste("trial", 1:4, sep = "")
head(splitdat)
trial1 trial2 trial3 trial4
1 597 1071 1430 997
2 614 322 1242 1140
3 1522 1679 51 1120
4 225 1988 1938 1068
5 621 623 1174 55
6 1918 1828 136 1816
and finally, to calculate the mean per person:
apply(splitdat, 1, mean)
[1] 1187.50 361.25 963.75 1017.00 916.25 1409.50 730.00 1310.75 1133.75
[10] 851.25 914.75 881.25 889.00 1014.75 676.75 850.50 805.00 1460.00
[19] 901.00 1443.50 507.25 691.50 1090.00 833.25 669.25
A nifty, if rather heavy-handed, way is to use read.csv in conjunction with textConnection. Assuming your data is in a data frame, df:
x <- read.csv(textConnection(df[["reaction times"]]))
Old question, but I came across it from another recent question (which seems unrelated).
Both existing answers are appropriate, but I wanted to share an answer related to a package I have created called "splitstackshape" that is fast and has straightforward syntax.
Here's some sample data:
set.seed(1)
dat = data.frame(
reaction_time = apply(matrix(round(
runif(24, 1, 2000)), 6, 4), 1, paste, collapse = ","))
This is the splitting:
library(splitstackshape)
cSplit(dat, "reaction_time", ",")
# reaction_time_1 reaction_time_2 reaction_time_3 reaction_time_4
# 1: 532 1889 1374 761
# 2: 745 1322 769 1555
# 3: 1146 1259 1540 1869
# 4: 1817 125 996 425
# 5: 404 413 1436 1304
# 6: 1797 354 1984 252
And, optionally, if you need to take the rowMeans:
rowMeans(cSplit(dat, "reaction_time", ","))
# [1] 1139.00 1097.75 1453.50 840.75 889.25 1096.75
Another option using dplyr and tidyr with Paul Hiemstra's example data is:
# create example data
data = data.frame(reaction_time =
apply(matrix(round(runif(100, 1, 2000)),
25, 4), 1, paste, collapse = ","),
stringsAsFactors=FALSE)
head(data)
# clean data
data2 <- data %>% mutate(split_reaction_time = str_split(as.character(reaction_time), ",")) %>% unnest(split_reaction_time)
data2$col_names <- c("trial1", "trial2", "trial3", "trial4")
data2 <- data2 %>% spread(key = col_names, value = split_reaction_time) %>% select(-reaction_time)
head(data2)

Creating multiple functions with dplyr non-standard evaluation

I am trying to use mutate_() to create multiple columns where each is based on a custom function called with different inputs. I can use paste() to create multiple quoted function calls, but this doesn't work because dplyr's NSE requires formulas (~) rather than quoted strings to be able to find the function. How can I write the "dots = " line below so that the function can be found? I tried experimenting with ~, as.formula(), and lazyeval::interp(), but couldn't get any to work. My actual "prefixes" is a long vector so I don't want to separately write out the function calls for each new column. Thanks
library(dplyr)
library(lazyeval)
library(nycflights13)
myfunc = function(x, y) { x - y }
# this works
flights1 <- mutate(flights, dep_time_sched = myfunc(dep_time, dep_delay),
arr_time_sched = myfunc(arr_time, arr_delay))
# this doesn't - Error: could not find function "myfunc"
prefixes <- c('dep', 'arr')
dots = as.list(paste0('myfunc(',
paste0(prefixes, '_time'), ', ',
paste0(prefixes, '_delay)')))
flights2 <- mutate_(flights, .dots = setNames(dots, paste0(prefixes, '_time_sched')))
You could approach this by using interp with lapply to loop through your prefixes and get a list in the desired format for mutate_.
dots = lapply(prefixes, function(var) interp(~myfunc(x, y),
.values = list(x = as.name(paste0(var, "_time")),
y = as.name(paste0(var, "_delay")))))
dots
[[1]]
~myfunc(dep_time, dep_delay)
<environment: 0x0000000019e51f00>
[[2]]
~myfunc(arr_time, arr_delay)
<environment: 0x0000000019f1e5b0>
This gives the same results as your flights1.
flights2 = mutate_(flights, .dots = setNames(dots, paste0(prefixes, '_time_sched')))
identical(flights1, flights2)
[1] TRUE
My actual "prefixes" is a long vector so I don't want to separately write out the function calls for each new column.
If that's the case, you should really transform your data to long format. To clarify what I mean, let's look at a smaller example:
mydat <- flights[1:5, c(paste0(prefixes,"_time"), paste0(prefixes,"_delay"))]
# dep_time arr_time dep_delay arr_delay
# (int) (int) (dbl) (dbl)
# 1 517 830 2 11
# 2 533 850 4 20
# 3 542 923 2 33
# 4 544 1004 -1 -18
# 5 554 812 -6 -25
library(data.table)
longdat <- setDT(mydat)[, .(
pref = rep(prefixes, each=.N),
time = unlist(mget(paste0(prefixes,"_time"))),
delay = unlist(mget(paste0(prefixes,"_delay")))
)]
longdat[, time_sched := myfunc(time, delay) ]
# pref time delay time_sched
# 1: dep_ 517 2 515
# 2: dep_ 533 4 529
# 3: dep_ 542 2 540
# 4: dep_ 544 -1 545
# 5: dep_ 554 -6 560
# 6: arr_ 830 11 819
# 7: arr_ 850 20 830
# 8: arr_ 923 33 890
# 9: arr_ 1004 -18 1022
# 10: arr_ 812 -25 837
Besides being simpler, calling the function a single time takes advantage of its vectorization.
While I used data.table to construct longdat, I'm sure there's a tool to do the same thing in the tidyr package (companion to dplyr). Similarly, the addition of the time_sched column is just a mutate.
Alternative ways of reshaping Thanks to #akrun, here is another way to get to longdat, using melt function syntax that will be available in the next version of data.table (1.9.8, not released yet):
longdat <- melt(mydat,
measure = patterns('time$','delay$'),
variable.name = "pref",
value.name = c('time', 'delay')
)[, pref := prefixes[pref]]
or, also thanks to #akrun, here is a way to reshape that automatically constructs the prefixes, given the suffixes (time and delay), using #AnandaMahto's splitstackshape package:
library(splitstackshape)
longdat <- merged.stack(transform(mydat, ind=1:nrow(mydat)),
var.stubs = c('_time', '_delay'),
sep = 'var.stubs',
atStart = FALSE)

Equivalent of SAS format (in R)

Suppose I have a dataframe:
sick <- c("daa12", "daa13", "daa14", "daa15", "daa16", "daa17")
code <- c("heart", "heart", "lung", "lung", "cancer", "cancer")
sick_code <- data.frame(sick, code)
And another:
pid <- abs(round(rnorm(6)*1000,0))
sick <- c("-" , "-", "-", "-", "daa16", "SO")
p_sick <- data.frame(pid, sick)
Now i would like to add a new varialbe to p_sick, that "translates" p_sick$sick to sick_code$code. The variable in p_sick$sick is a string which may or may not be p_sick$sick in this case NA should be returned.
Now I could write for loop with a simple ifelse statement. But the data I have is 150million rows long, and the translate table is 15.000 long.
I have googled that this is the equalivalent of a "proc format" in SaS (but I do not have acces to SaS, nor do I have any idea how it works).
Perhaps some variant of merge in plyr, or an apply function?
EDIT: I have accepted both answer, since they work.
I will try and look into the difference (in speed) between the two. Since merge is a built in function I am guessing it does lots of checking.
EDIT2: To people getting here by Google; merge has and sort = FALSE which will speed things up. Note that the order is not preserved in any way.
data.table will be suitable in your example:
library(data.table)
setkey(setDT(p_sick),sick)
p_sick[setDT(sick_code),code := i.code][]
pid sick code
1: 3137 - NA
2: 755 - NA
3: 1327 - NA
4: 929 - NA
5: 939 daa16 cancer
6: 906 SO NA
Please see here for detail explanation.
You could use merge with all.x = TRUE (to keep values from p_sick with no match in sick_code:
merge(p_sick, sick_code, all.x = TRUE)
An equivalent is using left_join from dplyr:
library(dplyr)
left_join(p_sick, sick_code)
# pid sick code
# 1 212 - <NA>
# 2 2366 - <NA>
# 3 325 - <NA>
# 4 269 - <NA>
# 5 501 daa16 cancer
# 6 1352 SO <NA>
Note that each of these solutions works only because the name sick is shared between the two data frames. Suppose they had different names- say the column was called sickness in sick_code. You could accommodate this with, respectively:
merge(p_sick, sick_code, by.x = "sick", by.y = "sickness", all.x = TRUE)
# or
left_join(p_sick, sick_code, c(sick = "sickness"))
A simple named vector will also work. The named vector can act as a lookup. So instead of defining sick and code as a data frame, define it as a named vector and use it as a decode. Like this:
# Set up named vector
sick_decode <- c("heart", "heart", "lung", "lung", "cancer", "cancer")
names(sick_decode) <- c("daa12", "daa13", "daa14", "daa15", "daa16", "daa17")
# Prepare data
pid <- abs(round(rnorm(6)*1000,0))
sick <- c("-" , "-", "-", "-", "daa16", "SO")
p_sick <- data.frame(pid, sick)
# Create new variable using decode
p_sick$sick_decode <- sick_decode[p_sick$sick]
# Results
#> pid sick sick_decode
#> 1 511 - <NA>
#> 2 1619 - <NA>
#> 3 394 - <NA>
#> 4 641 - <NA>
#> 5 53 daa16 cancer
#> 6 244 SO <NA>
I suspect this method will also be fast, but have not benchmarked it.
Also, there is now an R package specifically for replicating SAS format functionality in R. It is called fmtr.

Convert comma separated string to numeric columns

I have a dataset with several columns, one of which is a column for reaction times. These reaction times are comma separated to denote the reaction times (of the same participant) for the different trials.
For example: row 1 (i.e.: the data from participant 1) has the following under the column "reaction times"
reaction_times
2000,1450,1800,2200
Hence these are the reaction times of participant 1 for trials 1,2,3,4.
I now want to create a new data set in which the reaction times for these trials all form individual columns. This way I can calculate the mean reaction time for each trial.
trial 1 trial 2 trial 3 trial 4
participant 1: 2000 1450 1800 2200
I tried the colsplit from the reshape2 package but that doesn't seem to split my data into new columns (perhaps because my data is all in 1 cell).
Any suggestions?
I think you are looking for the strsplit() function;
a = "2000,1450,1800,2200"
strsplit(a, ",")
[[1]]
[1] "2000" "1450" "1800" "2200"
Notice that strsplit returns a list, in this case with only one element. This is because strsplit takes vectors as input. Therefore, you can also put a long vector of your single cell characters into the function and get back a splitted list of that vector. In a more relevant example this look like:
# Create some example data
dat = data.frame(reaction_time =
apply(matrix(round(runif(100, 1, 2000)),
25, 4), 1, paste, collapse = ","),
stringsAsFactors=FALSE)
splitdat = do.call("rbind", strsplit(dat$reaction_time, ","))
splitdat = data.frame(apply(splitdat, 2, as.numeric))
names(splitdat) = paste("trial", 1:4, sep = "")
head(splitdat)
trial1 trial2 trial3 trial4
1 597 1071 1430 997
2 614 322 1242 1140
3 1522 1679 51 1120
4 225 1988 1938 1068
5 621 623 1174 55
6 1918 1828 136 1816
and finally, to calculate the mean per person:
apply(splitdat, 1, mean)
[1] 1187.50 361.25 963.75 1017.00 916.25 1409.50 730.00 1310.75 1133.75
[10] 851.25 914.75 881.25 889.00 1014.75 676.75 850.50 805.00 1460.00
[19] 901.00 1443.50 507.25 691.50 1090.00 833.25 669.25
A nifty, if rather heavy-handed, way is to use read.csv in conjunction with textConnection. Assuming your data is in a data frame, df:
x <- read.csv(textConnection(df[["reaction times"]]))
Old question, but I came across it from another recent question (which seems unrelated).
Both existing answers are appropriate, but I wanted to share an answer related to a package I have created called "splitstackshape" that is fast and has straightforward syntax.
Here's some sample data:
set.seed(1)
dat = data.frame(
reaction_time = apply(matrix(round(
runif(24, 1, 2000)), 6, 4), 1, paste, collapse = ","))
This is the splitting:
library(splitstackshape)
cSplit(dat, "reaction_time", ",")
# reaction_time_1 reaction_time_2 reaction_time_3 reaction_time_4
# 1: 532 1889 1374 761
# 2: 745 1322 769 1555
# 3: 1146 1259 1540 1869
# 4: 1817 125 996 425
# 5: 404 413 1436 1304
# 6: 1797 354 1984 252
And, optionally, if you need to take the rowMeans:
rowMeans(cSplit(dat, "reaction_time", ","))
# [1] 1139.00 1097.75 1453.50 840.75 889.25 1096.75
Another option using dplyr and tidyr with Paul Hiemstra's example data is:
# create example data
data = data.frame(reaction_time =
apply(matrix(round(runif(100, 1, 2000)),
25, 4), 1, paste, collapse = ","),
stringsAsFactors=FALSE)
head(data)
# clean data
data2 <- data %>% mutate(split_reaction_time = str_split(as.character(reaction_time), ",")) %>% unnest(split_reaction_time)
data2$col_names <- c("trial1", "trial2", "trial3", "trial4")
data2 <- data2 %>% spread(key = col_names, value = split_reaction_time) %>% select(-reaction_time)
head(data2)

Resources