grouping, comparing, and counting rows in r - r

I have a data frame that looks as the following:
system Id initial final
665 9 16001 6070 6071
683 10 16001 6100 6101
696 11 16001 6101 6113
712 10 16971 6150 6151
715 11 16971 6151 6163
4966 7 4118 10238 10242
5031 9 4118 10260 10278
5088 10 4118 10279 10304
5115 11 4118 10305 10317
structure(list(system = c(9L, 10L, 11L, 10L, 11L, 7L, 9L, 10L,
11L), Id = c(16001L, 16001L, 16001L, 16971L, 16971L, 4118L, 4118L,
4118L, 4118L), initial = c(6070, 6100, 6101, 6150, 6151, 10238,
10260, 10279, 10305), final = c(6071, 6101, 6113, 6151, 6163,
10242, 10278, 10304, 10317)), .Names = c("system", "Id", "initial",
"final"), row.names = c(665L, 683L, 696L, 712L, 715L, 4966L,
5031L, 5088L, 5115L), class = "data.frame")
I would like to get a new data frame with the next structure
Id system length initial final
1 16001 9,10,11 3 6070 6113
2 16971 10,11 2 6150 6163
3 4118 7 1 10238 10242
4 4118 9,10,11 3 10260 10317
structure(list(Id = c(16001L, 16971L, 4118L, 4118L), system = structure(c(3L,
1L, 2L, 3L), .Label = c("10,11", "7", "9,10,11"), class = "factor"),
length = c(3L, 2L, 1L, 3L), initial = c(6070L, 6150L, 10238L,
10260L), final = c(6113, 6163, 10242, 10317)), .Names = c("Id",
"system", "length", "initial", "final"), class = "data.frame", row.names = c(NA,
-4L))
The grouping is by Id and the difference (between rows) in "system" field equal to one. Also I would like to get the different "system" and how many of that involved in grouping. Finally a column with the first "initial" and the last "final" involved also.
It is possible to do that in r?
Thanks.

You could use data.table. Convert "data.frame" to "data.table" (setDT), create a grouping variable "indx" by taking the difference of adjacent elements of "system" (diff(system)), cumsum the logical vector, use "Id" and "indx" as grouping variable to get the statistics.
library(data.table)
setDT(df)[,list(system=toString(system), length=.N, initial=initial[1L],
final=final[.N]), by=list(Id,indx=cumsum(c(TRUE, diff(system)!=1)))][,
indx:=NULL][]
# Id system length initial final
#1: 16001 9, 10, 11 3 6070 6113
#2: 16971 10, 11 2 6150 6163
#3: 4118 7 1 10238 10242
#4: 4118 9, 10, 11 3 10260 10317
Or based on #jazzurro's comment about using first/last functions from dplyr,
library(dplyr)
df %>%
group_by(indx=cumsum(c(TRUE, diff(system)!=1)), Id) %>%
summarise(system=toString(system), length=n(),
initial=first(initial), final=last(final))

A solution without data.table, but plyr:
library(plyr)
func = function(subdf)
{
bool = c(diff(subdf$system),1)==1
ldply(split(subdf, bool), function(u){
data.frame(system = paste(u$system, collapse=','),
Id = unique(u$Id),
length = nrow(u),
initial= head(u,1)$initial,
final = tail(u,1)$final)
})
}
ldply(split(df, df$Id), func)
# .id system length Id initial final
#1 FALSE 7 1 4118 10238 10242
#2 TRUE 9,10,11 3 4118 10260 10317
#3 TRUE 9,10,11 3 16001 6070 6113
#4 TRUE 10,11 2 16971 6150 6163

Related

Filter data frame based on numeric vector with "tolerance"

I would like to filter data frame using numeric vector. I am applying function below:
test_data <- exp_data[exp_data$Size_Change %in% vec_data,]
That's how example data looks like:
dput(exp_data)
structure(list(Name = c("Mark", "Greg", "Tomas", "Morka", "Pekka",
"Robert", "Tim", "Tom", "Bobby", "Terka"), Mode = c(1, 2, NA,
4, NA, 3, NA, 1, NA, 3), Change = structure(c(6L, 2L, 4L, 5L,
7L, 7L, 7L, 8L, 3L, 1L), .Label = c("D[+58], I[+12][+385]", "C[+58], K[+1206]",
"C[+58], P[+2074]", "C[+58], K[+2172]", "C[+58], K[+259]", "C[+58], K[+2665]",
"C[+58], T[+385]", "C[+58], C[+600]"), class = "factor"), Size = c(1335.261,
697.356, 1251.603, 920.43, 492.236, 393.991, 492.239, 727.696,
1218.933, 495.237), Place = c(3L, 4L, 3L, 2L, 4L, 5L, 4L, 3L,
3L, 4L), Size_Change = c(4004, 2786, 3753, 1840, 1966, 1966,
1966, 2181, 3655, 1978)), row.names = 2049:2058, class = "data.frame")
and vector used for filtering:
dput(vec_data)
c(4003, 2785, 954, 1129, 4013, 756, 1852, 2424, 1954, 246, 147,
234, 562, 1617, 2180, 888, 1176)
I mentioned about tolerance because vec_data is not very precise and I am expecting +1/-1 difference in numbers and after applying function it will not filter rows with such difference. It may also happen that difference will be +12/-12 or +24/-24. Can I somehow take it into account while filtering ?
Of course probably solution is to do smth like that (vec_data +1) / (vec_data -1) / (vec_data +12), etc. and do couple of filtering attempts and maybe finally rbind outputs of all but I am looking for more "elegant" way. It would also be great if there could be a column added which will indicate how the row was filtered if it was an exact number from vec_data or it was modified by +1, +12, -24 or whatever. Please, take into account that the combination of +1/-1 with any other modification is also possible. Additional column is not necessary if it makes it too complicated.
One option could be (tolerance = 1):
df %>%
filter(sapply(Size_Change, function(x) any(abs(x - vec) %in% 0:1)))
Name Mode Change Size Place Size_Change
1 Mark 1 C[+58], K[+2665] 1335.261 3 4004
2 Greg 2 C[+58], K[+1206] 697.356 4 2786
3 Tom 1 C[+58], C[+600] 727.696 3 2181
Tolerance = 14:
df %>%
filter(sapply(Size_Change, function(x) any(abs(x - vec) %in% 0:14)))
Name Mode Change Size Place Size_Change
1 Mark 1 C[+58], K[+2665] 1335.261 3 4004
2 Greg 2 C[+58], K[+1206] 697.356 4 2786
3 Morka 4 C[+58], K[+259] 920.430 2 1840
4 Pekka NA C[+58], T[+385] 492.236 4 1966
5 Robert 3 C[+58], T[+385] 393.991 5 1966
6 Tim NA C[+58], T[+385] 492.239 4 1966
7 Tom 1 C[+58], C[+600] 727.696 3 2181
The same logic with rowwise():
df %>%
rowwise() %>%
filter(any(abs(Size_Change - vec) %in% 0:1))
The most obvious methodology is to filter based on inequality rather than exact matched (always recommended when comparing numeric [not integers])
comp <- function(x, yvec, tolerance = 1){
sapply(x, \(xi){any(abs(xi - yvec) <= tolerance)})
}
exp_data[comp(exp_data$Size_Change, vec_data),]
Name Mode Change Size Place Size_Change
2049 Mark 1 C[+58], K[+2665] 1335.261 3 4004
2050 Greg 2 C[+58], K[+1206] 697.356 4 2786
2056 Tom 1 C[+58], C[+600] 727.696 3 2181
# Tolerance = 2
# exp_data[comp(exp_data$Size_Change, vec_data, 2),]
What about using a tolerance function.
tol <- \(x, tol=1L) sapply(seq(-tol, tol, 1L), \(i) sweep(as.matrix(x), 1L, i))
exp_data[exp_data$Size_Change %in% tol(vec_data), ]
# Name Mode Change Size Place Size_Change
# 2049 Mark 1 C[+58], K[+2665] 1335.261 3 4004
# 2050 Greg 2 C[+58], K[+1206] 697.356 4 2786
# 2056 Tom 1 C[+58], C[+600] 727.696 3 2181
It defaults to tolerance ±1, if we want ±24 we may define it in the argument:
exp_data[exp_data$Size_Change %in% tol(vec_data, 24L), ]
# Name Mode Change Size Place Size_Change
# 2049 Mark 1 C[+58], K[+2665] 1335.261 3 4004
# 2050 Greg 2 C[+58], K[+1206] 697.356 4 2786
# 2052 Morka 4 C[+58], K[+259] 920.430 2 1840
# 2053 Pekka NA C[+58], T[+385] 492.236 4 1966
# 2054 Robert 3 C[+58], T[+385] 393.991 5 1966
# 2055 Tim NA C[+58], T[+385] 492.239 4 1966
# 2056 Tom 1 C[+58], C[+600] 727.696 3 2181
# 2058 Terka 3 D[+58], I[+12][+385] 495.237 4 1978
I you are wondering about the L in 24L, it is integer notation, you may also use tol=24 without any problems.
Note: R version 4.1.2 (2021-11-01)

Subsetting rows based on multiple columns using data.table - fastest way

I was wondering if there was a more elegant, less clunky and faster way to do this. I have millions of rows with ICD coding for clinical data. A short example provided below. I was to subset the dataset based on either of the columns meeting a specific set of diagnosis codes. The code below works but takes ages in R and was wondering if there is a faster way.
structure(list(eid = 1:10, mc1 = structure(c(4L, 3L, 5L, 2L,
1L, 1L, 1L, 1L, 1L, 1L), .Label = c("345", "410", "413.9", "I20.1",
"I23.4"), class = "factor"), oc1 = c(350, 323, 12, 35, 413.1,
345, 345, 345, 345, 345), oc2 = structure(c(5L, 6L, 4L, 1L, 1L,
2L, 2L, 2L, 3L, 2L), .Label = c("", "345", "I20.3", "J23.6",
"K50.1", "K51.4"), class = "factor")), .Names = c("eid", "mc1",
"oc1", "oc2"), class = c("data.table", "data.frame"), row.names = c(NA,
-10L), .internal.selfref = <pointer: 0x102812578>)
The code below subsets all rows that meet the code of either "I20" or "413" (this would include all codes that have for example been coded as "I20.4" or "413.9" etc.
dat2 <- dat [substr(dat$mc1,1,3)== "413"|
substr(dat$oc1,1,3)== "413"|
substr(dat$oc2,1,3)== "413"|
substr(dat$mc1,1,3)== "I20"|
substr(dat$oc1,1,3)== "I20"|
substr(dat$oc2,1,3)== "I20"]
Is there a faster way to do this? For example can i loop through each of the columns looking for the specific codes "I20" or "413" and subset those rows?
We can specify the columns of interest in .SDcols, loop through the Subset of Data.table (.SD), get the first 3 characters with substr, check whether it is %in% a vector of values and Reduce it to a single logical vector for subsetting the rows
dat[dat[,Reduce(`|`, lapply(.SD, function(x)
substr(x, 1, 3) %chin% c('413', 'I20'))), .SDcols = 2:4]]
# eid mc1 oc1 oc2
#1: 1 I20.1 350.0 K50.1
#2: 2 413.9 323.0 K51.4
#3: 5 345 413.1
#4: 9 345 345.0 I20.3
For larger data it could help if we dont chech all rows:
minem <- function(dt, colsID = 2:4) {
cols <- colnames(dt)[colsID]
x <- c('413', 'I20')
set(dt, j = "inn", value = F)
for (i in cols) {
dt[inn == F, inn := substr(get(i), 1, 3) %chin% x]
}
dt[inn == T][, inn := NULL][]
}
n <- 1e7
set.seed(13)
dt <- dts[sample(.N, n, replace = T)]
dt <- cbind(dt, dts[sample(.N, n, replace = T), 2:4])
setnames(dt, make.names(colnames(dt), unique = T))
dt
# eid mc1 oc1 oc2 mc1.1 oc1.1 oc2.1
# 1: 8 345 345.0 345 345 345 345
# 2: 3 I23.4 12.0 J23.6 413.9 323 K51.4
# 3: 4 410 35.0 413.9 323 K51.4
# 4: 1 I20.1 350.0 K50.1 I23.4 12 J23.6
# 5: 10 345 345.0 345 345 345 345
# ---
# 9999996: 3 I23.4 12.0 J23.6 I20.1 350 K50.1
# 9999997: 5 345 413.1 I20.1 350 K50.1
# 9999998: 4 410 35.0 345 345 345
# 9999999: 4 410 35.0 410 35
# 10000000: 10 345 345.0 345 345 345 I20.3
system.time(r1 <- akrun(dt, 2:ncol(dt))) # 22.88 sek
system.time(r2 <- minem(dt, 2:ncol(dt))) # 17.72 sek
all.equal(r1, r2)
# [1] TRUE

Aggregate Dates to produce unique periods

I would like to able to aggregate survey data collected over a range of days into a unique period. For example, for the first three dates (2015-03-17, 2015-03-23, 2015-03-26), i'd like to combine to produce the period "March 2015". I will then use these combined dates to produce boxplots which show "Average.Counts" for that period.
All up I would like to make 4 unique periods:
March 15 (first 3 dates as per table below)
September 15 (dates 4,5 as per table below)
March 2016 (dates 6-15 as per table below)
September 2016 (dates 16-23 as per table below)
Here are the dataset headings.
head(Survival.Pre.Harvest)
Bay.Unique Date Average.Count Total.Predators Time Previous.Average.Count
2 1 2015-03-17 346.9 2 0 NA
3 1 2015-09-14 326.6 8 181 346.9
4 1 2016-02-29 322.6 3 349 326.6
7 2 2015-03-17 326.4 2 0 NA
8 2 2015-09-14 288.8 4 181 326.4
9 2 2016-02-29 271.4 6 349 288.8
These are the unique dates within the dataset.
table(Survival.Pre.Harvest$Date)
2015-03-17 2015-03-23 2015-03-26 2015-09-14 2015-09-15 2016-02-24 2016-02-25 2016-02-26 2016-02-29
9 3 1 9 3 4 6 6 5
2016-03-01 2016-03-02 2016-03-03 2016-03-04 2016-03-22 2016-03-23 2016-09-12 2016-09-13 2016-09-14
3 6 3 6 6 2 6 6 4
2016-09-20 2016-09-22 2016-10-18 2016-10-19 2016-10-20
7 10 4 3 14
Thanks in advance!
dput(head(Survival.Pre.Harvest))
structure(list(Bay.Unique = c(1, 1, 1, 2, 2, 2), Date = structure(c(16511,
16692, 16860, 16511, 16692, 16860), class = "Date"), Average.Count = c(346.9,
326.6, 322.6, 326.4, 288.8, 271.4), Total.Predators = c(2L, 8L,
3L, 2L, 4L, 6L), Time = c(0, 181, 349, 0, 181, 349), Previous.Average.Count = c(NA,
346.9, 326.6, NA, 326.4, 288.8)), .Names = c("Bay.Unique", "Date",
"Average.Count", "Total.Predators", "Time", "Previous.Average.Count"
), row.names = c(2L, 3L, 4L, 7L, 8L, 9L), class = "data.frame")
This should work:
library(lubridate)
library(ggplot2)
Survival.Pre.Harvest$Date <- ymd(Survival.Pre.Harvest$Date)
bks = ymd("2015-01-01", "2015-08-31", "2016-01-01", "2016-08-31", "2017-01-01")
lbs <- c("Mar2015", "Sep2015", "Mar2016", "Sep2016")
Survival.Pre.Harvest$yearmonth <- cut.Date(Survival.Pre.Harvest$Date, breaks = bks, labels = lbs)
ggplot(Survival.Pre.Harvest, aes(x=yearmonth, y=Average.Count)) + geom_boxplot()

Two columns' results are coming in the same column in R

I have a data frame with "Sol.grp" (non-numeric) and "age" (numeric) columns. I'm trying to store mean of age and count of observations in two separate columns.
I used the following code:
> summary <- data.frame(aggregate(age~sol.grp, data=na.omit(all.tkts), FUN=function(x) c(mean= mean(x), count=length(x))))
Mean & Count are coming in the same column (shown below)
I do not know what's wrong. Any ideas? Thanks in advance for your help !
Edit: The example dataset is shown at the end
row sol.grp Mean
1 Account A 187.7154
2 Account B 215.7747
3 WMID 199.0201
4 Qty 254.5545
5 PM 210.7109
6 CS 165.6500
7 ED 158.5483
8 TM 271.1966
9 39.0000
10 131.0000
11 189.0000
12 149.0000
13 3533.0000
14 2.0000
15 338.0000
16 58.0000
Example data: (Top 20 rows)
sol.grp age
Account A 29.6
Account B 29.6
WMID 26.9
Qty 1.7
PM 3.0
CS 2043.8
ED 24.3
TM 24.3
Account A 24.3
Account B 133.3
WMID 27.0
Qty 2.1
PM 29.2
CS 29.4
ED 97.8
TM 192.9
Account A 651.6
Account B 148.6
WMID 125.2
Qty 31.1
You could try this using data.table
library(data.table)
res1 <- setDT(all.tkts)[, list(Mean=mean(age, na.rm=TRUE), Count=.N),
keyby=sol.grp]
The aggregate results did not show any anomaly using the below example
res2 <- do.call(data.frame,aggregate(age~sol.grp,
data=na.omit(all.tkts), FUN=function(x) c(mean= mean(x), count=length(x))))
res2
# sol.grp age.mean age.count
#1 Account A 235.16667 3
#2 Account B 103.83333 3
#3 CS 1036.60000 2
#4 ED 61.05000 2
#5 PM 16.10000 2
#6 Qty 11.63333 3
#7 TM 108.60000 2
#8 WMID 59.70000 3
data
all.tkts <- structure(list(sol.grp = structure(c(1L, 2L, 8L, 6L, 5L, 3L,
4L, 7L, 1L, 2L, 8L, 6L, 5L, 3L, 4L, 7L, 1L, 2L, 8L, 6L), .Label = c("Account A",
"Account B", "CS", "ED", "PM", "Qty", "TM", "WMID"), class = "factor"),
age = c(29.6, 29.6, 26.9, 1.7, 3, 2043.8, 24.3, 24.3, 24.3,
133.3, 27, 2.1, 29.2, 29.4, 97.8, 192.9, 651.6, 148.6, 125.2,
31.1)), .Names = c("sol.grp", "age"), class = "data.frame", row.names = c(NA,
-20L))
Following from your own code works well:
aggregate(age~sol.grp, data=na.omit(all.tkts), FUN=function(x) c(mean= mean(x), count=length(x)))
sol.grp age.mean age.count
1 Account A 235.16667 3.00000
2 Account B 103.83333 3.00000
3 CS 1036.60000 2.00000
4 ED 61.05000 2.00000
5 PM 16.10000 2.00000
6 Qty 11.63333 3.00000
7 TM 108.60000 2.00000
8 WMID 59.70000 3.00000
Just avoid putting data.frame around aggregate, since aggregate returns a data.frame.
EDIT:
The details of output are:
> dd = aggregate(age~sol.grp, data=na.omit(all.tkts), FUN=function(x) c(mean= mean(x), count=length(x)))
> str(dd)
'data.frame': 8 obs. of 2 variables:
$ sol.grp: Factor w/ 8 levels "Account A","Account B",..: 1 2 3 4 5 6 7 8
$ age : num [1:8, 1:2] 235.2 103.8 1036.6 61 16.1 ...
..- attr(*, "dimnames")=List of 2
.. ..$ : NULL
.. ..$ : chr "mean" "count"
>
> dd$sol.grp
[1] Account A Account B CS ED PM Qty TM WMID
Levels: Account A Account B CS ED PM Qty TM WMID
> dd$age
mean count
[1,] 235.16667 3
[2,] 103.83333 3
[3,] 1036.60000 2
[4,] 61.05000 2
[5,] 16.10000 2
[6,] 11.63333 3
[7,] 108.60000 2
[8,] 59.70000 3
>
> dd$age[,2]
[1] 3 3 2 2 2 3 2 3
>
> dd$age[,1]
[1] 235.16667 103.83333 1036.60000 61.05000 16.10000 11.63333 108.60000 59.70000

Number of active items in bins

I have a list of items with 2 dates (start date and end date) and duration in days (end date - start date). I want to cut them into bins to show the number of "active items" in each bin, i.e. if start date <= bin date and end date > bin date, the item should be counted in the bin.
Item StartDate EndDate Duration
Machine1 2005/01/21 2011/03/29 2258
Machine2 2004/05/12 2012/05/08 2918
Machine3 2004/10/15 2005/09/10 330
Machine4 2004/08/30 2011/08/02 2528
Machine5 2005/06/06 2010/12/03 2006
Machine6 2004/05/11 2007/03/17 1040
Machine7 2005/08/09 2011/05/30 2120
Machine8 2005/01/06 2012/06/07 2709
Machine9 2005/06/13 2008/08/28 1172
Machine10 2005/06/28 2010/04/08 1745
Machine11 2004/11/09 2007/05/14 916
Machine12 2005/05/26 2012/09/16 2670
Machine13 2004/05/28 2009/06/09 1838
Machine14 2005/01/06 2012/05/25 2696
Machine15 2005/08/20 2012/02/11 2366
Machine16 2004/08/02 2011/10/23 2638
Machine17 2004/08/10 2009/03/15 1678
Machine18 2005/05/08 2006/04/17 344
Machine19 2005/08/26 2006/07/24 332
Machine20 2004/03/30 2006/05/07 768
Bin counts that I want to produce:
2004/01/01 0
2005/01/01 9
2006/01/01 19
2007/01/01 16
2008/01/01 14
2009/01/01 13
2010/01/01 11
2011/01/01 9
2012/01/01 5
2013/01/01 0
As you can see, the totals of the bins do not add up to the total number of items, as you would expect with a traditional histogram.
I can do this with some verbose code, but I'm sure there must be some short way, using cut or split. I'm aware that the bin labels are off by one according to my definition above, but let's ignore that for now.
A way is:
#turn dates to actual dates
DF$StartDate <- as.Date(DF$StartDate, "%Y/%m/%d")
DF$EndDate <- as.Date(DF$EndDate, "%Y/%m/%d")
binDF[,1] <- as.Date(binDF[,1], "%Y/%m/%d")
counts <- colSums(sapply(binDF[,1], function(x) {DF$StartDate <= x & DF$EndDate > x}))
#> counts
#[1] 0 9 19 16 14 13 11 9 5 0
And as a complete dataframe:
resDF <- data.frame(dates = binDF[,1], counts = counts, stringsAsFactors = F)
#> resDF
# dates counts
#1 2004-01-01 0
#2 2005-01-01 9
#3 2006-01-01 19
#4 2007-01-01 16
#5 2008-01-01 14
#6 2009-01-01 13
#7 2010-01-01 11
#8 2011-01-01 9
#9 2012-01-01 5
#10 2013-01-01 0
The dataframes DF and binDF:
DF <- structure(list(Item = c("Machine1", "Machine2", "Machine3", "Machine4",
"Machine5", "Machine6", "Machine7", "Machine8", "Machine9", "Machine10",
"Machine11", "Machine12", "Machine13", "Machine14", "Machine15",
"Machine16", "Machine17", "Machine18", "Machine19", "Machine20"
), StartDate = c("2005/01/21", "2004/05/12", "2004/10/15", "2004/08/30",
"2005/06/06", "2004/05/11", "2005/08/09", "2005/01/06", "2005/06/13",
"2005/06/28", "2004/11/09", "2005/05/26", "2004/05/28", "2005/01/06",
"2005/08/20", "2004/08/02", "2004/08/10", "2005/05/08", "2005/08/26",
"2004/03/30"), EndDate = c("2011/03/29", "2012/05/08", "2005/09/10",
"2011/08/02", "2010/12/03", "2007/03/17", "2011/05/30", "2012/06/07",
"2008/08/28", "2010/04/08", "2007/05/14", "2012/09/16", "2009/06/09",
"2012/05/25", "2012/02/11", "2011/10/23", "2009/03/15", "2006/04/17",
"2006/07/24", "2006/05/07"), Duration = c(2258L, 2918L, 330L,
2528L, 2006L, 1040L, 2120L, 2709L, 1172L, 1745L, 916L, 2670L,
1838L, 2696L, 2366L, 2638L, 1678L, 344L, 332L, 768L)), .Names = c("Item",
"StartDate", "EndDate", "Duration"), class = "data.frame", row.names = c(NA,
-20L))
binDF <- structure(list(V1 = c("2004/01/01", "2005/01/01", "2006/01/01",
"2007/01/01", "2008/01/01", "2009/01/01", "2010/01/01", "2011/01/01",
"2012/01/01", "2013/01/01"), V2 = c(0L, 9L, 19L, 16L, 14L, 13L,
11L, 9L, 5L, 0L)), .Names = c("V1", "V2"), class = "data.frame", row.names = c(NA,
-10L))

Resources