Categorizing dataframe based on information in another dataframe - r

Im trying to categorize one dataframe based on information in another dataframe. In df1 I have information on the measurement type (e.g. if a jar contained wet or dry soil and whether or not the treatment was "None" or "ul5") at a given time. In df2 I have information on what a measured value X was at a given time. I need to know the measurement type for every measured value of X.
I have tried to use full_join and fill() but neither were able to give me my desired outcome. Any ideas?
Here's df1:
df1 <- structure(list(Jar = c("Soil_dry", "Soil_dry", "soil_wet", "soil_wet",
"Soil_dry", "Soil_dry", "soil_wet"), Treatment = c("None", "None",
"None", "None", "ul5", "ul5", "ul5"), Timestamp = structure(c(1608129063,
1608129122, 1608129126, 1608129136, 1608129189, 1608129242, 1608129252
), class = c("POSIXct", "POSIXt"), tzone = "UTC")), class = c("spec_tbl_df",
"tbl_df", "tbl", "data.frame"), row.names = c(NA, -7L), spec = structure(list(
cols = list(Jar = structure(list(), class = c("collector_character",
"collector")), Treatment = structure(list(), class = c("collector_character",
"collector")), Timestamp = structure(list(format = ""), class = c("collector_datetime",
"collector"))), default = structure(list(), class = c("collector_guess",
"collector")), skip = 1), class = "col_spec"))
df2:
df2 <- structure(list(X = c(5, 3, 34, 4, 65, 9, 7), Timestamp = structure(c(1608129064,
1608129122, 1608129125, 1608129133, 1608129188, 1608129240, 1608129243
), class = c("POSIXct", "POSIXt"), tzone = "UTC")), class = c("spec_tbl_df",
"tbl_df", "tbl", "data.frame"), row.names = c(NA, -7L), spec = structure(list(
cols = list(X = structure(list(), class = c("collector_double",
"collector")), Timestamp = structure(list(format = ""), class = c("collector_datetime",
"collector"))), default = structure(list(), class = c("collector_guess",
"collector")), skip = 1), class = "col_spec"))
Desired data:
desired_data <- structure(list(X = c(5, 3, 34, 4, 65, 9, 7), Timestamp = structure(c(1608129064,
1608129122, 1608129125, 1608129133, 1608129188, 1608129240, 1608129243
), class = c("POSIXct", "POSIXt"), tzone = "UTC"), Jar = c("Soil_dry",
"Soil_dry", "Soil_dry", "soil_wet", "soil_wet", "Soil_dry", "Soil_dry"
), Treatment = c("None", "None", "None", "None", "None", "ul5",
"ul5")), class = c("spec_tbl_df", "tbl_df", "tbl", "data.frame"
), row.names = c(NA, -7L), spec = structure(list(cols = list(
X = structure(list(), class = c("collector_double", "collector"
)), Timestamp = structure(list(format = ""), class = c("collector_datetime",
"collector")), Jar = structure(list(), class = c("collector_character",
"collector")), Treatment = structure(list(), class = c("collector_character",
"collector"))), default = structure(list(), class = c("collector_guess",
"collector")), skip = 1), class = "col_spec"))

Try data.table's rolling join.
library(data.table)
setDT(df1)
setDT(df2)
df1[df2, roll = "nearest", on = "Timestamp"]
If we want to make sure that the row selected is always greater than Timestamp from df2 :
library(dplyr)
tidyr::crossing(df1 %>%rename(Timestamp1 = Timestamp),
df2 %>% rename(Timestamp2 = Timestamp)) %>%
mutate(diff = as.numeric(Timestamp2 - Timestamp1)) %>%
filter(diff > 0) %>%
arrange(Jar, Timestamp2, diff) %>%
group_by(Timestamp2) %>%
slice(1L) %>%
ungroup %>%
arrange(Timestamp2) %>%
select(-diff)
# Jar Treatment Timestamp1 X Timestamp2
# <chr> <chr> <dttm> <dbl> <dttm>
#1 Soil_dry None 2020-12-16 14:31:03 5 2020-12-16 14:31:04
#2 Soil_dry None 2020-12-16 14:31:03 3 2020-12-16 14:32:02
#3 Soil_dry None 2020-12-16 14:32:02 34 2020-12-16 14:32:05
#4 Soil_dry None 2020-12-16 14:32:02 4 2020-12-16 14:32:13
#5 Soil_dry None 2020-12-16 14:32:02 65 2020-12-16 14:33:08
#6 Soil_dry ul5 2020-12-16 14:33:09 9 2020-12-16 14:34:00
#7 Soil_dry ul5 2020-12-16 14:34:02 7 2020-12-16 14:34:03

Related

How to join tables on prefix equality?

I have a table with prefixes (here in csv format):
PREFIX,LABEL
A,Infectious diseases
B,Infectious diseases
C,Tumor
D1,Tumor
D2,Tumor
D31,Tumor
D32,Tumor
D33,Blood disorder
D4,Blood disorder
D5,Blood disorder
And I want to join it with this one:
AGE,DEATH_CODE
67,A02
85,D318
75,C007+X
62,D338
To get obviously:
AGE,LABEL
67,Infectious diseases
85,Tumor
75,Tumor
62,Blood disorder
I know how to do that with SQL and LIKE but not with tidyverse left_join or base R.
Dput of data
Table 1: CIM_CODES
structure(list(PREFIX = c("A", "B", "C", "D1", "D2", "D31", "D32",
"D33", "D4", "D5"), LABEL = c("Infectious diseases", "Infectious diseases",
"Tumor", "Tumor", "Tumor", "Tumor", "Tumor", "Blood disorder",
"Blood disorder", "Blood disorder")), row.names = c(NA, -10L), spec = structure(list(
cols = list(PREFIX = structure(list(), class = c("collector_character",
"collector")), LABEL = structure(list(), class = c("collector_character",
"collector"))), default = structure(list(), class = c("collector_guess",
"collector")), delim = ","), class = "col_spec"), problems = <pointer: 0x000002527d306190>, class = c("spec_tbl_df",
"tbl_df", "tbl", "data.frame"))
Table 2: DEATH_CAUSES
structure(list(AGE = c(67, 85, 75, 62), DEATH_CODE = c("A02",
"D318", "C007+X", "D338")), row.names = c(NA, -4L), spec = structure(list(
cols = list(AGE = structure(list(), class = c("collector_double",
"collector")), DEATH_CODE = structure(list(), class = c("collector_character",
"collector"))), default = structure(list(), class = c("collector_guess",
"collector")), delim = ","), class = "col_spec"), problems = <pointer: 0x0000025273898c60>, class = c("spec_tbl_df",
"tbl_df", "tbl", "data.frame"))
You could do a partial string match that has the lowest difference between the two columns:
library(tidyverse)
DEATH_CAUSES |>
mutate(LABEL = map_chr(DEATH_CODE,
~CIM_CODES$LABEL[
which.min(stringdist::stringdist(.x, CIM_CODES$PREFIX))
]))
#> # A tibble: 4 x 3
#> AGE DEATH_CODE LABEL
#> <dbl> <chr> <chr>
#> 1 67 A02 Infectious diseases
#> 2 85 D318 Tumor
#> 3 75 C007+X Tumor
#> 4 62 D338 Blood disorder
UPDATE
not using the stringdist package as requested.
library(tidyverse)
get_match <- function(code, prefix, target){
map(code, \(x){
map(prefix, \(y){
grepl(paste0("^", y), x)
})
}) |>
map_chr(\(z) target[unlist(z) |> which()] )
}
DEATH_CAUSES |>
mutate(LABEL = get_match(DEATH_CAUSES$DEATH_CODE,
CIM_CODES$PREFIX,
CIM_CODES$LABEL))
#> # A tibble: 4 x 3
#> AGE DEATH_CODE LABEL
#> <dbl> <chr> <chr>
#> 1 67 A02 Infectious diseases
#> 2 85 D318 Tumor
#> 3 75 C007+X Tumor
#> 4 62 D338 Blood disorder
EDIT
how to do this with a join:
library(tidyverse)
library(fuzzyjoin)
fuzzy_left_join(DEATH_CAUSES,
CIM_CODES,
by = c("DEATH_CODE" = "PREFIX"),
str_detect)
#> # A tibble: 4 x 4
#> AGE DEATH_CODE PREFIX LABEL
#> <dbl> <chr> <chr> <chr>
#> 1 67 A02 A Infectious diseases
#> 2 85 D318 D31 Tumor
#> 3 75 C007+X C Tumor
#> 4 62 D338 D33 Blood disorder
My code below, I used mysql:
select a.age, p.label
from prefix p
left join age a on a.death_code like CONCAT("%",p.prefix,"%");
You can refer here: how to use a like with a join in sql?

Structure of a for loop

I am learning how to create a function in R, but I am struggling to understand how to write for loop. My understanding is that
for (item I list_items) {
do_something(itemn)
}
I would like to write a for loop to replace with 333 the cells that are equal with 123. So the item is 123 and the list of items is the df from sec1 till sec4.
Could somebody explain this to me, please? And how this can be included in a function?
Sample code:
structure(list(sec1 = c(1, 123, 1), sec2 = c(123, 1, 1), sec3 = c(123,
0, 0), sec4 = c(1, 123, 1)), spec = structure(list(cols = list(
sec1 = structure(list(), class = c("collector_double", "collector"
)), sec2 = structure(list(), class = c("collector_double",
"collector")), sec3 = structure(list(), class = c("collector_double",
"collector")), sec4 = structure(list(), class = c("collector_double",
"collector"))), default = structure(list(), class = c("collector_guess",
"collector")), delim = ","), class = "col_spec"), row.names = c(NA,
-3L), class = c("spec_tbl_df", "tbl_df", "tbl", "data.frame"))
We do not need a for loop here:
df[df==123]<-333
If we really need for loops:
for(i in 1:ncol(df)){
df[i][df[i]==123] <-333
}
output
df
# A tibble: 3 x 4
sec1 sec2 sec3 sec4
<dbl> <dbl> <dbl> <dbl>
1 1 333 333 1
2 333 1 0 333
3 1 1 0 1
Here's how it would work for one column of your data:
dat <- structure(list(sec1 = c(1, 123, 1),
sec2 = c(123, 1, 1),
sec3 = c(123, 0, 0),
sec4 = c(1, 123, 1)),
spec = structure(list(cols = list(
sec1 = structure(list(),
class = c("collector_double", "collector")),
sec2 = structure(list(),
class = c("collector_double","collector")),
sec3 = structure(list(),
class = c("collector_double", "collector")),
sec4 = structure(list(),
class = c("collector_double","collector"))),
default = structure(list(),
class = c("collector_guess","collector")),
delim = ","), class = "col_spec"),
row.names = c(NA,-3L), class =
c("spec_tbl_df", "tbl_df", "tbl", "data.frame"))
for(i in 1:nrow(dat)){
dat$sec1[i] <- ifelse(dat$sec1[i] == 123, 333, dat$sec1[i])
}
dat
#> sec1 sec2 sec3 sec4
#> 1 1 123 123 1
#> 2 333 1 0 123
#> 3 1 1 0 1
Created on 2022-01-31 by the reprex package (v2.0.1)
To replace all of them, using for loops, you could do a double loop over columns and rows.
for(j in names(dat)){
for(i in 1:nrow(dat)){
dat[[j]][i] <- ifelse(dat[[j]][i] == 123, 333, dat[[j]][i])
}
}
Of course, as others have identified, you certainly don't need a for loop to accomplish this.
in addition to DaveArmstrong Answer this would work for all rows and columns:
dat <- structure(list(sec1 = c(1, 123, 1), sec2 = c(123, 1, 1), sec3 = c(123,
0, 0), sec4 = c(1, 123, 1)), spec = structure(list(cols = list(
sec1 = structure(list(), class = c("collector_double", "collector"
)), sec2 = structure(list(), class = c("collector_double",
"collector")), sec3 = structure(list(), class = c("collector_double",
"collector")), sec4 = structure(list(), class = c("collector_double",
"collector"))), default = structure(list(), class = c("collector_guess",
"collector")), delim = ","), class = "col_spec"), row.names = c(NA,
-3L), class = c("spec_tbl_df", "tbl_df", "tbl", "data.frame"))
for(i in 1:nrow(dat)){
for(j in 1:ncol(dat)){
dat[i,j] <- ifelse(dat[i,j] == 123, 333, dat[i,j])
}
}

Wide to long without having an X in front of variables

I have my data in the wide-format
what is the easiest way to change it to long without having a X in front of the time variables
Sample data:
structure(list(X1 = c("01/12/2019", "02/12/2019"), `00:30` = c(41.95,
39.689), `01:00` = c(44.96, 40.47), `01:30` = c(42.939, 38.95
), `02:00` = c(43.221, 40.46), `02:30` = c(44.439, 41.97)), class = "data.frame", row.names = c(NA,
-2L), spec = structure(list(cols = list(X1 = structure(list(), class = c("collector_character",
"collector")), `00:30` = structure(list(), class = c("collector_double",
"collector")), `01:00` = structure(list(), class = c("collector_double",
"collector")), `01:30` = structure(list(), class = c("collector_double",
"collector")), `02:00` = structure(list(), class = c("collector_double",
"collector")), `02:30` = structure(list(), class = c("collector_double",
"collector"))), default = structure(list(), class = c("collector_guess",
"collector")), skip = 1L), class = "col_spec"))
with pivot_longer and pivot_wider from tidyr:
dat |>
pivot_longer(names_to="time",values_to="val",`00:30`:`02:30`) |>
pivot_wider(names_from="X1", values_from="val")
Output:
# A tibble: 5 x 3
time `01/12/2019` `02/12/2019`
<chr> <dbl> <dbl>
1 00:30 42.0 39.7
2 01:00 45.0 40.5
3 01:30 42.9 39.0
4 02:00 43.2 40.5
5 02:30 44.4 42.0
I this special case, you could transpose the part of your data.frame containing numbers and assign the column names:
df_new <- data.frame(t(df[,-1]))
colnames(df_new) <- df[, 1]
This returns a data.frame df_new:
01/12/2019 02/12/2019
00:30 41.950 39.689
01:00 44.960 40.470
01:30 42.939 38.950
02:00 43.221 40.460
02:30 44.439 41.970
Edit (Thanks to jay.sf)
For versions of R >= 4.1, you could use the natural pipe:
t(df[, -1]) |>
data.frame() |>
`colnames<-`(df[, 1])

How to join combining table values without unique values added to the bottom in R code? Full_join is adding new values to the bottom

I need a chart of accounts to stay in order when new accounts are added or dropped in future years. This is because in Accounting the accounts are sorted by type (for example Asset, Liability Equity) but it is not explicit in the dataset. This is an example of the code that is putting new "Accounts" from Year2 and Year3 at the bottom.
XYZCompany_Consolidated <- XYZCompany_Year1 %>%
full_join(XYZCompany_Year2 by = "Account") %>%
full_join(XYZCompany_Year3, by = "Account")
Example: This picture is just to give a simplified example. The highlight in orange is where the new accounts are going and to the right is the code i'm using, and the green is what I'm trying to achieve
Perhaps I'm overthinking this problem but I find it hard to solve. Let's define some data first:
df_year1 <- structure(list(Account = c("Cash", "Accounts", "Loan1", "Auto",
"JaneDoe"), Year_1 = c(100, 1000, 20, 300, 500)), class = c("spec_tbl_df",
"tbl_df", "tbl", "data.frame"), row.names = c(NA, -5L), spec = structure(list(
cols = list(Account = structure(list(), class = c("collector_character",
"collector")), Year_1 = structure(list(), class = c("collector_double",
"collector"))), default = structure(list(), class = c("collector_guess",
"collector")), skip = 1L), class = "col_spec"))
df_year2 <- structure(list(Account = c("Cash", "Accounts", "Loan1", "Auto",
"Laptop", "JaneDoe"), Year_2 = c(80, 1200, 50, 300, 500, 0)), class = c("spec_tbl_df",
"tbl_df", "tbl", "data.frame"), row.names = c(NA, -6L), spec = structure(list(
cols = list(Account = structure(list(), class = c("collector_character",
"collector")), Year_2 = structure(list(), class = c("collector_double",
"collector"))), default = structure(list(), class = c("collector_guess",
"collector")), skip = 1L), class = "col_spec"))
df_year3 <- structure(list(Account = c("Cash", "Accounts", "Loan1", "Auto",
"Rent", "JaneDoe"), Year_3 = c(80, 1200, 50, 300, 1000, 0)), class = c("spec_tbl_df",
"tbl_df", "tbl", "data.frame"), row.names = c(NA, -6L), spec = structure(list(
cols = list(Account = structure(list(), class = c("collector_character",
"collector")), Year_3 = structure(list(), class = c("collector_double",
"collector"))), default = structure(list(), class = c("collector_guess",
"collector")), skip = 1L), class = "col_spec"))
Those are similar to the data shown in the OP's picture, e.g. df_year1 looks like
# A tibble: 5 x 2
Account Year_1
<chr> <dbl>
1 Cash 100
2 Accounts 1000
3 Loan1 20
4 Auto 300
5 JaneDoe 500
Next we transform those data a little bit, namely
library(dplyr)
library(tidyr)
df_y1 <- df_year1 %>%
mutate(Year = 1,
no = row_number()) %>%
rename(value = Year_1)
which returns
# A tibble: 5 x 4
Account value Year no
<chr> <dbl> <dbl> <int>
1 Cash 100 1 1
2 Accounts 1000 1 2
3 Loan1 20 1 3
4 Auto 300 1 4
5 JaneDoe 500 1 5
The new column no stores the account's original position, column Year stores the chart's year. All three data.frames are processed like this, so we get df_y1, df_y2, df_y3.
Finally we bind them together
bind_rows(df_y1, df_y2, df_y3) %>%
mutate(num_years = max(Year)) %>%
group_by(Account) %>%
mutate(rank = sum((num_years - n() + 1) * no), .keep = "unused") %>%
pivot_wider(names_from = Year) %>%
arrange(rank) %>%
select(-rank) %>%
ungroup()
and calculate a rank for each account. The accounts are ordered by this rank. As a result, we get
# A tibble: 7 x 4
Account Year_1 Year_2 Year_3
<chr> <dbl> <dbl> <dbl>
1 Cash 100 80 80
2 Accounts 1000 1200 1200
3 Loan1 20 50 50
4 Auto 300 300 300
5 Laptop NA 500 NA
6 Rent NA NA 1000
7 JaneDoe 500 0 0
Note
I believe, there are better approaches, but at least this works for the example data.
I'm not sure about the calculated rank's stability. Take care.

Create a new variable based on other variables in a time series using R

All,
I want to quantify the operation time of a remote sensor by determining if the sensor generated a value within a set time period (2 hours), which would indicate if the sensor was functioning during that time. My dataframe has a datetime variable formatted as Y-M-D H-M-S (example: 2020-04-06 09:50:00), and 1 site variable (with 6 different sites) that I want to evaluate the operation time of.
All help is appreciated.
Edit*
Here is dput of the head of my data. I'm not sure if this is how I am supposed to provide it.
structure(list(datetime = structure(c(1564618522, 1564618874, 1564618933,
1564618994, 1564619054, 1564622122), class = c("POSIXct",
"POSIXt"), tzone = "UTC"), fracsec = c(0.75, 0.33, 0.57, 0.1,
0.07, 0.95), duration = c(NA_real_, NA_real_, NA_real_, NA_real_,
NA_real_, NA_real_), tagtype = c(NA_character_, NA_character_,
NA_character_, NA_character_, NA_character_, NA_character_),
PITnum = c("999000000007426", "985121002397230", "985121002397230",
"985121002397230", "985121002397230", "999000000007426"),
consdetc = c(NA_integer_, NA_integer_, NA_integer_, NA_integer_,
NA_integer_, NA_integer_), arrint = c(NA_integer_, NA_integer_,
NA_integer_, NA_integer_, NA_integer_, NA_integer_), site = c("DSDS",
"DSDS", "DSDS", "DSDS", "DSDS", "DSDS"), manuf = c("Biomark",
"Biomark", "Biomark", "Biomark", "Biomark", "Biomark"), srcfile = c("C:\\Users\\jrjohnson\\Documents\\MoraPIT\\julyAllArraysAWformat\\dsds\\Archive\\2020-04-01_DSDS_08092019.txt",
"C:\\Users\\jrjohnson\\Documents\\MoraPIT\\julyAllArraysAWformat\\dsds\\Archive\\2020-04-01_DSDS_08092019.txt",
"C:\\Users\\jrjohnson\\Documents\\MoraPIT\\julyAllArraysAWformat\\dsds\\Archive\\2020-04-01_DSDS_08092019.txt",
"C:\\Users\\jrjohnson\\Documents\\MoraPIT\\julyAllArraysAWformat\\dsds\\Archive\\2020-04-01_DSDS_08092019.txt",
"C:\\Users\\jrjohnson\\Documents\\MoraPIT\\julyAllArraysAWformat\\dsds\\Archive\\2020-04-01_DSDS_08092019.txt",
"C:\\Users\\jrjohnson\\Documents\\MoraPIT\\julyAllArraysAWformat\\dsds\\Archive\\2020-04-01_DSDS_08092019.txt"
), srcline = 21:26, compdate = structure(c(18353, 18353,
18353, 18353, 18353, 18353), class = "Date")), spec = structure(list(
cols = list(datetime = structure(list(format = ""), class =
c("collector_datetime",
"collector")), fracsec = structure(list(), class = c("collector_double",
"collector")), duration = structure(list(), class = c("collector_double",
"collector")), tagtype = structure(list(), class =
c("collector_character",
"collector")), PITnum = structure(list(), class = c("collector_character",
"collector")), consdetc = structure(list(), class = c("collector_integer",
"collector")), arrint = structure(list(), class = c("collector_integer",
"collector")), site = structure(list(), class = c("collector_character",
"collector")), manuf = structure(list(), class = c("collector_character",
"collector")), srcfile = structure(list(), class =
c("collector_character",
"collector")), srcline = structure(list(), class = c("collector_integer",
"collector")), compdate = structure(list(format = "%Y-%m-%d"), class =
c("collector_date",
"collector"))), default = structure(list(), class = c("collector_guess",
"collector")), skip = 0), class = "col_spec"), row.names = 23803:23808,
class = "data.frame")
Here's a way to do this using the := operator in data.table:
Sample data
library(data.table)
time_threshold <- Sys.time() + 180
dat <- data.table(
time = seq.POSIXt(from = Sys.time(), by = 60, length.out = 10),
value = rnorm(n = 10, mean = 10, sd = 2)
)
Code
To add a new variable based on time and value columns:
> time_threshold
[1] "2020-04-06 13:08:42 EDT"
> dat
time value
1: 2020-04-06 13:05:42 8.240336
2: 2020-04-06 13:06:42 9.744952
3: 2020-04-06 13:07:42 6.984802
4: 2020-04-06 13:08:42 8.015951
5: 2020-04-06 13:09:42 13.435096
6: 2020-04-06 13:10:42 10.835025
7: 2020-04-06 13:11:42 7.216484
8: 2020-04-06 13:12:42 9.559917
9: 2020-04-06 13:13:42 8.320369
10: 2020-04-06 13:14:42 13.201530
> dat[ time >= time_threshold & value >= 10, new_variable := 1]
> dat
time value new_variable
1: 2020-04-06 13:05:42 8.240336 NA
2: 2020-04-06 13:06:42 9.744952 NA
3: 2020-04-06 13:07:42 6.984802 NA
4: 2020-04-06 13:08:42 8.015951 NA
5: 2020-04-06 13:09:42 13.435096 1
6: 2020-04-06 13:10:42 10.835025 1
7: 2020-04-06 13:11:42 7.216484 NA
8: 2020-04-06 13:12:42 9.559917 NA
9: 2020-04-06 13:13:42 8.320369 NA
10: 2020-04-06 13:14:42 13.201530 1
You could also look at the mutate option with dplyr.

Resources