DATA = data.table(STUDENT = c(1,1,2,2,2,2,2,3,3,3,3,3,4,
SCORE = c(5,6,8,3,14,5,6,9,0,12,13,14,19))
WANT = data.table(STUDENT = c(1,1,4),
SCORE = c(5,6,19))
I have DATA and wish to create WANT which takes a random sample of 2 STUDENT and includes all of their data. I present WANT as an example.
I try this with no success
WANT = WANT[ , .SD[sample(x = .N, size = 2)], by = STUDENT]
sample the unique values of STUDENT and filter all the rows for those STUDENT,
library(data.table)
set.seed(1357)
DATA[STUDENT %in% sample(unique(STUDENT), 2)]
# STUDENT SCORE
#1: 1 5
#2: 1 6
#3: 4 19
Related
My data frame looks like this:
person. id98 id100 id102 educ98 educ100 educ102 pid98 pid100 pid102
1. 3. 0. 0. 2. 4. 5. T. F. F
2. ....
I hope to transform it like this:
person. year. id. educ. pid.
1. 98
1. 100
1. 102
In Stata, I know that the "reshape" command can automatically identify the year from those variables' names. In R, I don't know how to deal with that.
I want to get the number that is trailing in each column name and bundle the column based on that number.
If you would like to use reshape, maybe the code below could help
reshape(
setNames(df, gsub("(\\d+)", "\\.\\1", names(df))),
# the gsub needed because `reshape` expects a period as a separator
direction = "long",
varying = -1
)
which gives
person. time id educ pid
1.98 1 98 1 2 TRUE
1.100 1 100 1 4 FALSE
1.102 1 102 1 5 FALSE
Data
> dput(df)
structure(list(person. = 1, id98 = 3, id100 = 0, id102 = 0, educ98 = 2,
educ100 = 4, educ102 = 5, pid98 = TRUE, pid100 = FALSE, pid102 = FALSE), class = "data.frame", row.names = c(NA,
-1L))
You can use pivot_longer from tidyr. Using data from #ThomasIsCoding
tidyr::pivot_longer(df,
cols = -person.,
names_to = c('.value', 'year'),
names_pattern = '([a-z]+)(\\d+)')
# person. year id educ pid
# <dbl> <chr> <dbl> <dbl> <lgl>
#1 1 98 3 2 TRUE
#2 1 100 0 4 FALSE
#3 1 102 0 5 FALSE
Using the data.table package this is fairly easy.
As a note I think this'll only work if the columns are ordered in the same order i.e id90 id100 id102 pid90 pid100 pid102 ... etc.
Edit
The aforementioned issue has been solved in this new code.
# loading data.table
if(!require(data.table)){
install.packages("data.table")
library(data.table)
}
df= data.frame(person=1:5, id90=rnorm(5), id91=rnorm(5), id92=rnorm(5), pid90=rnorm(5), pid91=rnorm(5), pid92=rnorm(5), educ90=rnorm(5), educ91=rnorm(5), educ92=rnorm(5))
# turn data.frame in data.table
setDT(df)
cols = colnames(df)[order(colnames)]
# df[, ..cols] reorders the columns alphabetically
# to evade the problem stated above.
# id.vars is the id vars
# using patterns with measure vars will bundle all the columns
# that match the regex pattern in the same column
dt <- melt(df[, ..cols], id.vars="person", measure.vars=patterns(id="^id", educ="^educ", pid="^pid"))
# getting the years
years = gsub('^id', '', colnames(df)[grepl('^id', colnames(df))])
# changing the years
dt[, c("year","variable"):=list(years[variable], NULL)]
I am looking to create a dataframe that lists a unique ID with the movement of n different amounts across a period of m timesteps. I currently generate subsets of each timestep and then merge all these subsets with a separate dataframe that contains just the unique IDs. See below:
set.seed(129)
df1 <- data.frame(
id= c(rep(seq(1:7),3)),
time= c(1,1,1,1,1,1,1,2,2,2,2,2,2,2,3,3,3,3,3,3,3),
amount1= runif(21,0,50),
amount2= runif(21,-20,600),
amount3= runif(21,-15,200),
amount4= runif(21,-3,300)
)
df2 <- data.frame(
id = unique(df1$id)
)
sub_1 <- subset(df1, time == 1)
sub_2 <- subset(df1, time == 2)
sub_3 <- subset(df1, time == 3)
df2<-merge(df2,sub_1,by.x = "id",by.y = "id", all=TRUE)
df2<-merge(df2,sub_2,by.x = "id",by.y = "id", all=TRUE)
df2<-merge(df2,sub_3,by.x = "id",by.y = "id", all=TRUE)
#df2
id time.x amount1.x amount2.x amount3.x amount4.x time.y amount1.y amount2.y amount3.y amount4.y time amount1 amount2 amount3 amount4
1 1 1 6.558261 -17.713007 46.477430 195.061597 2 18.5453843 269.7406808 132.588713 80.40133 3 24.943217 488.1025 103.473479 198.51302
2 2 1 15.736044 230.018563 72.604346 -2.513162 2 48.8537058 356.5593748 161.239261 246.25985 3 35.559262 406.4749 66.278064 30.11592
3 3 1 8.057720 386.814867 101.997370 152.269564 2 0.7334493 0.7842648 66.603965 156.12478 3 42.170220 450.0306 195.872986 109.73098
4 4 1 15.575282 527.033563 37.403278 197.529341 2 37.8372445 370.0410836 6.074847 273.46715 3 20.302206 290.0026 -2.101649 112.88488
5 5 1 4.230635 427.294382 112.771237 199.401096 2 15.3735066 376.8945806 104.382371 224.09730 3 8.050933 291.6123 53.660734 270.37200
6 6 1 29.087870 9.330858 129.400932 70.801129 2 38.9966662 421.9258798 -3.891286 290.59259 3 17.919554 581.1735 137.100314 129.78561
7 7 1 4.380303 463.658580 4.120219 56.527016 2 6.0582455 484.4981686 67.820164 72.05615 3 43.556746 170.0745 41.134708 247.99512
I have a major issue with this, as the values of m and n increase this method becomes ugly and long. Is there a cleaner way to do this? Maybe as a one liner so I don't have to make say 15 subsets if m = 15.
Thanks
You just need your original df1 dataset and do this:
library(tidyverse)
df1 %>%
group_split(time) %>% # create your subsets and store them as a list of data frames
reduce(left_join, by = "id") # sequentially join those subsets
I have a dataset that I need to both split by one variable (Day) and then compare between groups of another variable (Group), performing per-group statistics (e.g. mean) and also tests.
Here's an example of what I devised:
require(data.table)
data = data.table(Day = rep(1:10, each = 10),
Group = rep(1:2, times = 50),
V = rnorm(100))
data[, .(g1_mean = mean(.SD[Group == 1]$V),
g2_mean = mean(.SD[Group == 2]$V),
p.value = t.test(V ~ Group, .SD, alternative = "two.sided")$p.value),
by = list(Day)]
Which produces:
Day g1_mean g2_mean p.value
1: 1 0.883406048 0.67177271 0.6674138
2: 2 0.007544956 -0.55609722 0.3948459
3: 3 0.409248637 0.28717183 0.8753213
4: 4 -0.540075365 0.23181458 0.1785854
5: 5 -0.632543900 -1.09965990 0.6457325
6: 6 -0.083221671 -0.96286343 0.2011136
7: 7 -0.044674252 -0.27666473 0.7079499
8: 8 0.260795244 -0.15159164 0.4663712
9: 9 -0.134164758 0.01136245 0.7992453
10: 10 0.496144329 0.76168408 0.1821123
I'm hoping that there's a less roundabout manner of arriving at this result.
A possible compact alternative which can also apply more functions to each group:
DTnew <- dcast(DT[, pval := t.test(V ~ Group, .SD, alternative = "two.sided")$p.value, Day],
Day + pval ~ paste0("g",Group), fun = list(mean,sd), value.var = "V")
which gives:
> DTnew
Day pval V_mean_g1 V_mean_g2 V_sd_g1 V_sd_g2
1: 1 0.4763594 -0.11630634 0.178240714 0.7462975 0.4516087
2: 2 0.5715001 -0.29689807 0.082970631 1.3614177 0.2745783
3: 3 0.2295251 -0.48792449 -0.031328749 0.3723247 0.6703694
4: 4 0.5565573 0.33982242 0.080169698 0.5635136 0.7560959
5: 5 0.5498684 -0.07554433 0.308661427 0.9343230 1.0100788
6: 6 0.4814518 0.57694034 0.885968245 0.6457926 0.6773873
7: 7 0.8053066 0.29845913 0.116217727 0.9541060 1.2782210
8: 8 0.3549573 0.14827289 -0.319017581 0.5328734 0.9036501
9: 9 0.7290625 -0.21589411 -0.005785092 0.9639758 0.8859461
10: 10 0.9899833 0.84034529 0.850429982 0.6645952 1.5809149
A decomposition of the code:
First, a pval variable is added to the dataset with DT[, pval := t.test(V ~ Group, .SD, alternative = "two.sided")$p.value, Day]
Because DT is updated in place and by reference by the previous step, the dcast function can be applied to that directly.
In the casting formula, you specify the variables that need to stay in the current form on the RHS and the variable that needs to be spread over columns on the LHS.
With the fun argument you can specify which aggregation function has to be used on the value.var (here V). If multiple aggregation functions are needed, you can specify them in a list (e.g. list(mean,sd)). This can be any type of function. So, also cumstom made functions can be used.
If you want to remove the V_ from the column names, you can do:
names(DTnew) <- gsub("V_","",names(DTnew))
NOTE: I renamed the data.table to DT as it is often not wise to name your dataset after a function (check ?data)
While not a one-liner, you might consider doing your two processes separate and then merging the results. This prevents you from having to hardcode the group-names.
First, we calculate the means:
my_means <- dcast(data[,mean(V), by = .(Day, Group)],
Day~ paste0("Mean_Group", Group),value.var="V1")
Or in the less-convoluted way #Akrun mentioned in the comments, with some added formatting.
my_means <- dcast(Day~paste0("Mean_Group", Group), data=data,
fun.agg=mean, value.var="V")
Then the t-tests:
t_tests <- data[,.(p_value=t.test(V~Group)$p.value), by = Day]
And then merge:
output <- merge(my_means, t_tests)
I am creating correlations using R, with the following code:
Values<-read.csv(inputFile, header = TRUE)
O<-Values$Abundance_O
S<-Values$Abundance_S
cor(O,S)
pear_cor<-round(cor(O,S),4)
outfile<-paste(inputFile, ".jpg", sep = "")
jpeg(filename = outfile, width = 15, height = 10, units = "in", pointsize = 10, quality = 75, bg = "white", res = 300, restoreConsole = TRUE)
rx<-range(0,20000000)
ry<-range(0,200000)
plot(rx,ry, ylab="S", xlab="O", main="O vs S", type="n")
points(O,S, col="black", pch=3, lwd=1)
mtext(sprintf("%s %.4f", "pearson: ", pear_cor), adj=1, padj=0, side = 1, line = 4)
dev.off()
pear_cor
I now need to find the lower quartile for each set of data and exclude data that is within the lower quartile. I would then like to rewrite the data without those values and use the new column of data in the correlation analysis (because I want to threshold the data by the lower quartile). If there is a way I can write this so that it is easy to change the threshold by applying arguments from Java (as I have with the input file name) that's even better!
Thank you so much.
I have now implicated the answer below and that is working, however I need to keep the pairs of data together for the correlation. Here is an example of my data (from csv):
Abundance_O Abundance_S
3635900.752 1390.883073
463299.4622 1470.92626
359101.0482 989.1609251
284966.6421 3248.832403
415283.663 2492.231265
2076456.856 10175.48946
620286.6206 5074.268802
3709754.717 269.6856808
803321.0892 118.2935093
411553.0203 4772.499758
50626.83554 17.29893001
337428.8939 203.3536852
42046.61549 152.1321255
1372013.047 5436.783169
939106.3275 7080.770535
96618.01393 1967.834701
229045.6983 948.3087208
4419414.018 23735.19352
So I need to exclude both values in the row if one does not meet my quartile threshold (0.25 quartile). So if the quartile for O was 45000 then the row "42046.61549,152.1321255" would be removed. Is this possible? If I read in both columns as a dataframe can I search each column separately? Or find the quartiles and then input that value into code to remove the appropriate rows?
Thanks again, and sorry for the evolution of the question!
Please try to provide a reproducible example, but if you have data in a data.frame, you can subset it using the quantile function as the logical test. For instance, in the following data we want to select only rows from the dataframe where the value of the measured variable 'Val' is above the bottom quartile:
# set.seed so you can reproduce these values exactly on your system
set.seed(39856)
df <- data.frame( ID = 1:10 , Val = runif(10) )
df
ID Val
1 1 0.76487516
2 2 0.59755578
3 3 0.94584374
4 4 0.72179297
5 5 0.04513418
6 6 0.95772248
7 7 0.14566118
8 8 0.84898704
9 9 0.07246594
10 10 0.14136138
# Now to select only rows where the value of our measured variable 'Val' is above the bottom 25% quartile
df[ df$Val > quantile(df$Val , 0.25 ) , ]
ID Val
1 1 0.7648752
2 2 0.5975558
3 3 0.9458437
4 4 0.7217930
6 6 0.9577225
7 7 0.1456612
8 8 0.8489870
# And check the value of the bottom 25% quantile...
quantile(df$Val , 0.25 )
25%
0.1424363
Although this is an old question, I came across it during research of my own and I arrived at a solution that someone may be interested in.
I first defined a function which will convert a numerical vector into its quantile groups. Parameter n determines the quantile length (n = 4 for quartiles, n = 10 for deciles).
qgroup = function(numvec, n = 4){
qtile = quantile(numvec, probs = seq(0, 1, 1/n))
out = sapply(numvec, function(x) sum(x >= qtile[-(n+1)]))
return(out)
}
Function example:
v = rep(1:20)
> qgroup(v)
[1] 1 1 1 1 1 2 2 2 2 2 3 3 3 3 3 4 4 4 4 4
Consider now the following data:
dt = data.table(
A0 = runif(100),
A1 = runif(100)
)
We apply qgroup() across the data to obtain two quartile group columns:
cols = colnames(dt)
qcols = c('Q0', 'Q1')
dt[, (qcols) := lapply(.SD, qgroup), .SDcols = cols]
head(dt)
> A0 A1 Q0 Q1
1: 0.72121846 0.1908863 3 1
2: 0.70373594 0.4389152 3 2
3: 0.04604934 0.5301261 1 3
4: 0.10476643 0.1108709 1 1
5: 0.76907762 0.4913463 4 2
6: 0.38265848 0.9291649 2 4
Lastly, we only include rows for which both quartile groups are above the first quartile:
dt = dt[Q0 + Q1 > 2]
I would like to extract the value of var2 that corresponds to the minimum value of var1 in each building-month combination. Here's my (fake) data set:
head(mydata)
# building month var1 var2
#1 A 1 -26.96333 376.9633
#2 A 1 165.38759 317.3993
#3 A 1 47.46345 271.0137
#4 A 2 73.47784 294.8171
#5 A 2 107.80130 371.7668
#6 A 2 10.16384 308.7975
Reproducible code:
## create fake data set:
set.seed(142)
mydata1 = data.frame(building = rep(LETTERS[1:5],6),month = sort(rep(1:6,5)),var1=rnorm(30,50,35),var2 = runif(30,200,400))
mydata2 = data.frame(building = rep(LETTERS[1:5],6),month = sort(rep(1:6,5)),var1=rnorm(30,60,35),var2 = runif(30,150,400))
mydata3 = data.frame(building = rep(LETTERS[1:5],6),month = sort(rep(1:6,5)),var1=rnorm(30,40,35),var2 = runif(30,250,400))
mydata = rbind(mydata1,mydata2,mydata3)
mydata = mydata[ order(mydata[,"building"], mydata[,"month"]), ]
row.names(mydata) = 1:nrow(mydata)
## here is how I pull the minimum value of v1 for each building-month combination:
require(reshape2)
m1 = melt(mydata, id.var=1:2)
d1 = dcast(m1, building ~ month, function(x) min(max(x,0), na.rm=T),
subset = .(variable == "var1"))
This pulls out the minimum value of var1 for each building-month combo...
head(d1)
# building 1 2 3 4 5 6
#1 A 165.38759 107.80130 93.32816 73.23279 98.55546 107.58780
#2 B 92.08704 98.94959 57.79610 94.10530 80.86883 99.75983
#3 C 93.38284 100.13564 52.26178 62.37837 91.98839 97.44797
#4 D 82.43440 72.43868 66.83636 105.46263 133.02281 94.56457
#5 E 70.09756 61.44406 30.78444 68.24334 94.35605 61.60610
However, what I want is a data frame set up exactly as d1 that instead shows the value of var2 that corresponds to the minimum value pulled for var1 (shown in d1 above). My gut tells me it should be a variation on which.min(), but haven't gotten this to work with dcast() or ddply(). Any help is appreciated!
It may be possible in one step, but I'm more familiar with plyr than reshape2,
dcast(ddply(mydata, .(building, month), summarize, value = var2[which.min(var1)]),
building ~ month)