Say I have a data table. I can create a column of lagged values:
>x
date id val valPr
1/4/14 a 1 2
1/3/14 a 2 3
1/2/14 a 3 4
1/1/14 a 4 NA
1/4/14 b 10 20
1/3/14 b 20 30
1/2/14 b 30 40
1/1/14 b 40 NA
Using:
setDT(x)[, valPr := c(val[-1], NA), by = "id"]
Is there a way to do something similar to lag by more than one period? Three for example?
It would produce something like this:
>x
date id val valPr
1/4/14 a 1 4
1/3/14 a 2 NA
1/2/14 a 3 NA
1/1/14 a 4 NA
1/4/14 b 10 40
1/3/14 b 20 NA
1/2/14 b 30 NA
1/1/14 b 40 NA
You could alternatively do the following. lead is a function in dplyr.
setDT(mydf)[, valPr2 := lead(val, 3), by = "id"]
# date id val valPr valPr2
#1: 1/4/14 a 1 2 4
#2: 1/3/14 a 2 3 NA
#3: 1/2/14 a 3 4 NA
#4: 1/1/14 a 4 NA NA
#5: 1/4/14 b 10 20 40
#6: 1/3/14 b 20 30 NA
#7: 1/2/14 b 30 40 NA
#8: 1/1/14 b 40 NA NA
DATA
mydf <- structure(list(date = structure(c(4L, 3L, 2L, 1L, 4L, 3L, 2L,
1L), .Label = c("1/1/14", "1/2/14", "1/3/14", "1/4/14"), class = "factor"),
id = structure(c(1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L), .Label = c("a",
"b"), class = "factor"), val = c(1L, 2L, 3L, 4L, 10L, 20L,
30L, 40L), valPr = c(2L, 3L, 4L, NA, 20L, 30L, 40L, NA)), .Names = c("date",
"id", "val", "valPr"), class = "data.frame", row.names = c(NA,
-8L))
With data.table, you would do it like this:
nlags = 3
x[ by="id",
, valPr := c( val[ - seq(nlags) ], rep( NA, nlags) )
]
What this does is replaces the first nlags from val and then put that number of NA values at the end. You can adjust this to easily put the lagged values at the beginning or end of the series.
Related
I have two vectors of id values associated with two different datasets. The two vectors correspond to the same individuals, but the id vectors are unrelated (and there are multiple observations for each individual in each dataset). My goal is to merge them by id, but because the ids are different and they are different lengths there is no way to do that without matching on id. There's obviously a lot more data than what I included in the example.
a <- c(4033,4833,681,9567,6175,7112,3889,264,3918,7685)
b <- c(1,4,7,10,14,18,22,26,27,37)
So 4033 = 1; 4833 = 4...etc.
dummy dataset1:
id day y
1 1 10
1 2 4
1 3 2
4 1 9
4 2 10
4 3 6
dummy dataset2:
id day y1
4033 1 100
4033 1 120
4033 2 150
4033 3 200
4833 1 120
4833 2 100
4833 2 50
4833 3 100
4833 3 200
What I would like is an easy way to get:
dummy dataset1 output:
id day y id.2
1 1 10 4033
1 2 4 4033
1 3 2 4033
4 1 9 4833
4 2 10 4833
4 3 6 4833
I'm trying a solution in a forloop like:
for (i in length(dataset)) {
dataset$id[dataset[[1]] %in% int] <- int1
}
But that's not working correctly (probably for an obvious reason I'm missing).
As we have two vectors, we can easily create a match with a named vector in base R
df1$id.2 <- setNames(a, b)[as.character(df1$id)]
df1
# id day y id.2
#1 1 1 10 4033
#2 1 2 4 4033
#3 1 3 2 4033
#4 4 1 9 4833
#5 4 2 10 4833
#6 4 3 6 4833
Or another base R option is match
df1$id.2 <- a[match(df1$id, b)]
data
df1 <- structure(list(id = c(1L, 1L, 1L, 4L, 4L, 4L), day = c(1L, 2L,
3L, 1L, 2L, 3L), y = c(10L, 4L, 2L, 9L, 10L, 6L)),
class = "data.frame", row.names = c(NA,
-6L))
df2 <- structure(list(id = c(4033L, 4033L, 4033L, 4033L, 4833L, 4833L,
4833L, 4833L, 4833L), day = c(1L, 1L, 2L, 3L, 1L, 2L, 2L, 3L,
3L), y1 = c(100L, 120L, 150L, 200L, 120L, 100L, 50L, 100L, 200L
)), class = "data.frame", row.names = c(NA, -9L))
Another approach is to make a data.frame of the IDs and use merge.
datasetID <- data.frame(id = b, id.2 = a)
merge(dataset1,datasetID)
id day y a
1 1 1 10 4033
2 1 2 4 4033
3 1 3 2 4033
4 4 1 9 4833
5 4 2 10 4833
6 4 3 6 4833
Data
a <- c(4033,4833,681,9567,6175,7112,3889,264,3918,7685)
b <- c(1,4,7,10,14,18,22,26,27,37)
dataset1 <- structure(list(id = c(1L, 1L, 1L, 4L, 4L, 4L), day = c(1L, 2L,
3L, 1L, 2L, 3L), y = c(10L, 4L, 2L, 9L, 10L, 6L)), class = "data.frame", row.names = c(NA,
-6L))
I have data like this:
df <-
a b c
1 2 3
1 2 4
1 2 5
1 2 9
2 3 3
2 3 4
2 3 5
2 3 9
3 4 3
3 4 4
3 4 5
3 4 9
I want to remove duplicate rows base on column a but keep the values in column c as in:
df2 <-
a b c c1 c2 c3
1 2 3 4 5 9
2 3 3 4 5 9
3 4 3 4 5 9
I know how to remove duplicates as in :
df2 <-df[!(df$a=="1"),]
But have now idea how to add the values to the kept row.
We can exclude c while subsettting the rows of the dataset, unlist, and then concatenate with the whole 'c' column
c(unlist(df[!duplicated(df$a), 1:2]), c = df$c)
# a b c1 c2 c3 c4
# 1 2 3 4 5 9
If we need the same names as in the expected
c(unlist(df[!duplicated(df$a), 1:2]),
setNames(df$c, make.unique(rep('c', nrow(df)), sep="")))
# a b c c1 c2 c3
# 1 2 3 4 5 9
With the new example
library(dplyr)
library(tidyr)
df2 %>%
group_by(a) %>%
summarise(b = first(b), c = list(as.list(c))) %>%
unnest_wider(c(c))%>%
rename_at(vars(starts_with('.')), ~ str_c('c', seq_along(.)))
# A tibble: 2 x 6
# a b c1 c2 c3 c4
# <int> <int> <int> <int> <int> <int>
#1 1 2 3 4 5 9
#2 2 2 3 4 5 9
Or with again updated example
df3 %>%
group_by(a) %>%
summarise(b = first(b), c = list(as.list(c))) %>%
unnest_wider(c(c))%>%
rename_at(vars(starts_with('.')), ~ str_c('c', seq_along(.)))
# A tibble: 3 x 6
# a b c1 c2 c3 c4
# <int> <int> <int> <int> <int> <int>
#1 1 2 3 4 5 9
#2 2 3 3 4 5 9
#3 3 4 3 4 5 9
Or with data.table
library(data.table)
setDT(df3)[, c(.(b = first(b)),
as.data.frame.list(setNames(c, rep('c', .N)))), a]
# a b c c.1 c.2 c.3
#1: 1 2 3 4 5 9
#2: 2 3 3 4 5 9
#3: 3 4 3 4 5 9
data
df <- structure(list(a = c(1L, 1L, 1L, 1L), b = c(2L, 3L, 3L, 4L),
c = c(3L, 4L, 5L, 9L)), class = "data.frame", row.names = c(NA,
-4L))
df2 <- structure(list(a = c(1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L), b = c(2L,
3L, 3L, 4L, 2L, 3L, 3L, 4L), c = c(3L, 4L, 5L, 9L, 3L, 4L, 5L,
9L)), class = "data.frame", row.names = c(NA, -8L))
df3 <- structure(list(a = c(1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L,
3L, 3L), b = c(2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 4L, 4L, 4L, 4L
), c = c(3L, 4L, 5L, 9L, 3L, 4L, 5L, 9L, 3L, 4L, 5L, 9L)), class = "data.frame", row.names = c(NA,
-12L))
Within a group, I want to find the difference between that row and the first time that user appeared in the data. For example, I need to create the diff variable below. Users have different number of rows each as in the following data:
df <- structure(list(ID = c(1L, 1L, 1L, 1L, 2L, 2L, 2L, 3L, 4L, 4L),
money = c(9L, 12L, 13L, 15L, 5L, 7L, 8L, 5L, 2L, 10L), occurence = c(1L,
2L, 3L, 4L, 1L, 2L, 3L, 1L, 1L, 2L), diff = c(NA, 3L, 4L,
6L, NA, 2L, 3L, NA, NA, 8L)), .Names = c("ID", "money", "occurence",
"diff"), class = "data.frame", row.names = c(NA, -10L))
ID money occurence diff
1 1 9 1 NA
2 1 12 2 3
3 1 13 3 4
4 1 15 4 6
5 2 5 1 NA
6 2 7 2 2
7 2 8 3 3
8 3 5 1 NA
9 4 2 1 NA
10 4 10 2 8
You can use ave(). We just remove the first value per group and replace it with NA, and subtract the first value from the rest of the values.
with(df, ave(money, ID, FUN = function(x) c(NA, x[-1] - x[1])))
# [1] NA 3 4 6 NA 2 3 NA NA 8
A dplyr solution, which uses the first function to get the first value and calculate the difference.
library(dplyr)
df2 <- df %>%
group_by(ID) %>%
mutate(diff = money - first(money)) %>%
mutate(diff = replace(diff, diff == 0, NA)) %>%
ungroup()
df2
# # A tibble: 10 x 4
# ID money occurence diff
# <int> <int> <int> <int>
# 1 1 9 1 NA
# 2 1 12 2 3
# 3 1 13 3 4
# 4 1 15 4 6
# 5 2 5 1 NA
# 6 2 7 2 2
# 7 2 8 3 3
# 8 3 5 1 NA
# 9 4 2 1 NA
# 10 4 10 2 8
Update
Here is a data.table solution provided by Sotos. Notice that no need to replace 0 with NA.
library(data.table)
setDT(df)[, money := money - first(money), by = ID][]
# ID money occurence diff
# 1: 1 0 1 NA
# 2: 1 3 2 3
# 3: 1 4 3 4
# 4: 1 6 4 6
# 5: 2 0 1 NA
# 6: 2 2 2 2
# 7: 2 3 3 3
# 8: 3 0 1 NA
# 9: 4 0 1 NA
# 10: 4 8 2 8
DATA
dput(df)
structure(list(ID = c(1L, 1L, 1L, 1L, 2L, 2L, 2L, 3L, 4L, 4L),
money = c(9L, 12L, 13L, 15L, 5L, 7L, 8L, 5L, 2L, 10L), occurence = c(1L,
2L, 3L, 4L, 1L, 2L, 3L, 1L, 1L, 2L)), .Names = c("ID", "money",
"occurence"), row.names = c(NA, -10L), class = "data.frame")
I have the following data table
PIECE SAMPLE QC_CODE
1 1 1
2 1 NA
3 2 2
4 2 4
5 2 NA
6 3 6
7 3 3
8 3 NA
9 4 6
10 4 NA
and I would like to count the number of qc_code in each sample and return an output like this
SAMPLE SAMPLE_SIZE QC_CODE_COUNT
1 2 1
2 3 2
3 3 2
4 2 1
Where sample size is the count of pieces in each sample, and qc_code_count is the count of al qc_code that are no NA.
How would I go about this in R
You can try
library(dplyr)
df1 %>%
group_by(SAMPLE) %>%
summarise(SAMPLE_SIZE=n(), QC_CODE_UNIT= sum(!is.na(QC_CODE)))
# SAMPLE SAMPLE_SIZE QC_CODE_UNIT
#1 1 2 1
#2 2 3 2
#3 3 3 2
#4 4 2 1
Or
library(data.table)
setDT(df1)[,list(SAMPLE_SIZE=.N, QC_CODE_UNIT=sum(!is.na(QC_CODE))), by=SAMPLE]
Or using aggregate from base R
do.call(data.frame,aggregate(QC_CODE~SAMPLE, df1, na.action=NULL,
FUN=function(x) c(SAMPLE_SIZE=length(x), QC_CODE_UNIT= sum(!is.na(x)))))
data
df1 <- structure(list(PIECE = 1:10, SAMPLE = c(1L, 1L, 2L, 2L, 2L, 3L,
3L, 3L, 4L, 4L), QC_CODE = c(1L, NA, 2L, 4L, NA, 6L, 3L, NA,
6L, NA)), .Names = c("PIECE", "SAMPLE", "QC_CODE"), class = "data.frame",
row.names = c(NA, -10L))
I would like to create a subset of data that consists of Units that have a higher score in QTR 4 than QTR 1 (upward trend). Doesn't matter if QTR 2 or 3 are present.
Unit QTR Score
5 4 34
1 1 22
5 3 67
2 4 78
3 2 39
5 2 34
1 2 34
5 1 67
1 3 70
1 4 89
3 4 19
Subset would be:
Unit QTR Score
1 1 22
1 2 34
1 3 70
1 4 89
I've tried variants of something like this:
upward_subset <- subset(mydata,Unit if QTR=4~Score > QTR=1~Score)
Thank you for your time
If the dataframe is named "d", then this succeeds on your test set:
d[ which(d$Unit %in%
(sapply( split(d, d["Unit"]),
function(dd) dd[dd$QTR ==4, "Score"] - dd[dd$QTR ==1, "Score"]) > 0)) ,
]
#-------------
Unit QTR Score
2 1 1 22
7 1 2 34
9 1 3 70
10 1 4 89
An alternative in two steps:
result <- unlist(
by(
test,
test$Unit,
function(x) x$Score[x$QTR==4] > x$Score[x$QTR==2])
)
test[test$Unit %in% names(result[result==TRUE]),]
Unit QTR Score
2 1 1 22
7 1 2 34
9 1 3 70
10 1 4 89
A solution using data.table (Probably there are better versions than what I have at the moment).
Note: Assuming a QTR value for a given Unit is unique
Data:
df <- structure(list(Unit = c(5L, 1L, 5L, 2L, 3L, 5L, 1L, 5L, 1L, 1L,
3L), QTR = c(4L, 1L, 3L, 4L, 2L, 2L, 2L, 1L, 3L, 4L, 4L), Score = c(34L,
22L, 67L, 78L, 39L, 34L, 34L, 67L, 70L, 89L, 19L)), .Names = c("Unit",
"QTR", "Score"), class = "data.frame", row.names = c(NA, -11L
))
Solution:
dt <- data.table(df, key=c("Unit", "QTR"))
dt[, Score[Score[QTR == 4] > Score[QTR == 1]], by=Unit]
Unit V1
1: 1 22
2: 1 34
3: 1 70
4: 1 89