Related
I have the following extract of my dataset about the occupancy of a football match:
example <- data.frame(Date <- c("2019-03-21", "2019-03-30", "2019-04-07",
"2019-03-21", "2019-03-30", "2019-04-07",
"2019-03-21", "2019-04-07",
"2019-03-21", "2019-03-30", "2019-04-07",
"2019-03-21", "2019-03-30", "2019-04-07",
"2019-03-21", "2019-03-30", "2019-04-07",
"2019-03-21", "2019-03-30", "2019-04-07",
"2019-03-21", "2019-03-30", "2019-04-07",
"2019-03-21", "2019-03-30",
"2019-03-21", "2019-03-30",
"2019-03-21", "2019-03-30",
"2019-03-21"),
Block <- c("43L","43L", "43L", "15B", "15B", "15B", "43L", "43L",
"15B", "15B", "15B",
"15B", "15B", "15B",
"15B", "15B", "15B",
"15B", "15B", "15B",
"15B", "15B", "15B",
"15B", "15B",
"15B", "15B",
"15B", "15B",
"15B"),
Preis <- as.numeric(c("24", "35", "30", "35", "45",
"40", "26", "30",
"35", "45", "40",
"34", "43", "42",
"35", "42", "45",
"36", "45", "43",
"36", "43", "40",
"35", "41",
"32", "42",
"30", "42",
"35")),
Max <- c("3", "3", "3", "10", "10","10","3", "3",
"10", "10","10",
"10", "10","10",
"10", "10","10",
"10", "10","10",
"10", "10","10",
"10", "10",
"10", "10",
"10", "10",
"10"),
Actual <- c("2", "1", "2", "10", "9", "6","2", "2",
"10", "9", "6",
"10", "9", "6",
"10", "9", "6",
"10", "9", "6",
"10", "9", "6",
"10", "9",
"10", "9",
"10", "9",
"10"),
Temperatur <- c("15", "20", "18","15", "20", "18", "15", "18",
"15", "20", "18",
"15", "20", "18",
"15", "20", "18",
"15", "20", "18",
"15", "20", "18",
"15", "20",
"15", "20",
"15", "20",
"15"),
Placesold <- c("1", "1", "1", "1", "1","1", "1", "1",
"1", "1", "1",
"1", "1", "1",
"1", "1", "1",
"1", "1", "1",
"1", "1", "1",
"1", "1",
"1", "1",
"1", "1",
"1") )
colnames(example) <- c("Date", "Block", "Price", "Max", "Actual", "Temprature", "Placesold")
In reality, the dataset contains over 100 blocks and 46 different dates.
If you take a closer look at the data, you can see that different numbers of seats are sold out in block 15B and 43L on different days.
table(example$Date, example$Block)
table(example$Placesold)
15B 43L
2019-03-21 10 2
2019-03-30 9 1
2019-4-07 6 2
> table(example$Placesold)
1
30
My goal is to add the seats that were not sold to the data set. The variable Placesold should be 0 instead of 1. In addition, the average price of the sold tickets should be used instead of the price (without 0).
To clarify my goal, I have added the missing rows for the reduced data set.
result <- data.frame(Date <- c("2019-03-21", "2019-03-30", "2019-4-07",
"2019-03-21", "2019-03-30", "2019-4-07",
"2019-03-21", "2019-03-30", "2019-4-07",
"2019-03-21", "2019-03-30", "2019-4-07",
"2019-03-21", "2019-03-30", "2019-4-07",
"2019-03-21", "2019-03-30", "2019-4-07",
"2019-03-21", "2019-03-30", "2019-4-07",
"2019-03-21", "2019-03-30", "2019-4-07",
"2019-03-21", "2019-03-30", "2019-4-07",
"2019-03-21", "2019-03-30", "2019-4-07",
"2019-03-21", "2019-03-30", "2019-4-07",
"2019-03-21", "2019-03-30", "2019-4-07",
"2019-03-21", "2019-03-30", "2019-4-07"),
Block <- c("43L","43L", "43L",
"15B", "15B", "15B",
"43L", "43L","43L",
"15B", "15B", "15B",
"43L", "43L","43L",
"15B", "15B", "15B",
"15B", "15B", "15B",
"15B", "15B", "15B",
"15B", "15B", "15B",
"15B", "15B", "15B",
"15B", "15B", "15B",
"15B", "15B", "15B",
"15B", "15B", "15B"),
Preis <- c("24", "35", "30",
"35", "45", "40",
"26", "35","30",
"35", "45", "40",
"25", "35","30",
"34", "43", "42",
"35", "42", "45",
"36", "45", "43",
"36", "43", "40",
"35", "41", "41.67",
"32", "42", "41.67",
"30", "42", "41.67",
"35","43.11","41.67"),
Max <- c("3", "3", "3", "10", "10","10",
"3", "3", "3",
"10", "10","10",
"3", "3", "3",
"10", "10","10",
"10", "10","10",
"10", "10","10",
"10", "10","10",
"10", "10","10",
"10", "10","10",
"10", "10","10",
"10", "10","10"),
Actual <- c("2", "1", "2",
"10", "9", "6",
"2", "1","2",
"10", "9", "6",
"2", "1","2",
"10", "9", "6",
"10", "9", "6",
"10", "9", "6",
"10", "9", "6",
"10", "9", "6",
"10", "9", "6",
"10", "9", "6",
"10", "9", "6"),
Temperatur <- c("15", "20", "18",
"15", "20", "18",
"15", "20", "18",
"15", "20", "18",
"15", "20", "18",
"15", "20", "18",
"15", "20", "18",
"15", "20", "18",
"15", "20", "18",
"15", "20", "18",
"15", "20", "18",
"15", "20", "18",
"15", "20", "18"),
Placesold <- c("1", "1", "1", "1", "1","1",
"1", "0", "1",
"1", "1", "1",
"0", "0", "0",
"1", "1", "1",
"1", "1", "1",
"1", "1", "1",
"1", "1", "1",
"1", "1", "0",
"1", "1", "0",
"1", "1", "0",
"1", "0", "0") )
colnames(result) <- c("Date", "Block", "Price", "Max", "Actual", "Temprature", "Placesold")
The results of the blocks and the data as well as the occurrence of the variable "Placesold" look like this:
table(result$Date, result$Block)
table(result$Placesold)
15B 43L
2019-03-21 10 3
2019-03-30 10 3
2019-4-07 10 3
> table(result$Placesold)
0 1
9 30
My first thought was to create a matrix with more rows, but to be honest I don't really know how. I hope you can help me.
Thank you very much.
I use dplyr functions and base::merge. merge can perform cross join between data frames, vectors and other types.
Construction of each date and block pair - it includes unsold blocks of a date:
# ordered, unique vector of dates
dates <- example$Date %>% unique() %>% sort()
# ordered, unique vector of blocks
blocks <- example$Block %>% unique() %>% sort()
# insert dummy block to demonstrate effects of missing blocks
blocks <- c("11B", blocks)
# cross join of dates and blocks: each date paired with each block
# (it results a data.frame)
eachDateBlock <- merge(dates, blocks, by = NULL)
# merge generate x and y as names for the resulted data.frame
# I rename them as a preparation for left_join
eachDateBlock <- eachDateBlock %>% rename(Date = x, Block = y)
# rows from 'eachDateBlock' with matchig row in 'example' get values of variables,
# otherwise they filled by NAs
extendedData <- eachDateBlock %>%
left_join(example, by = c("Date" = "Date", "Block" = "Block"))
# NOTE: before avgPrice you need something similar conversion - I ignore
# other numeric columns here
#example$Price <- as.double(example$Price)
#example$Placesold <- as.double(example$Placesold)
# Overwrite NAs in rows of supplied unsold blocks
avgPrice <- mean(example$Price)
result <- extendedData %>% mutate(
Price = if_else(is.na(Price), avgPrice, Price),
Placesold = if_else(is.na(Placesold), 0, Placesold)
) %>% arrange(Date)
> table(result$Date, result$Block)
11B 15B 43L
2019-03-21 1 10 2
2019-03-30 1 9 1
2019-04-07 1 6 2
> table(result$Placesold)
0 1
3 30
> result
Date Block Price Max Actual Temprature Placesold
1 2019-03-21 11B 37.53333 <NA> <NA> <NA> 0
.
.
.
12 2019-03-21 43L 24.00000 3 2 15 1
13 2019-03-21 43L 26.00000 3 2 15 1
14 2019-03-30 11B 37.53333 <NA> <NA> <NA> 0
15 2019-03-30 15B 45.00000 10 9 20 1
.
.
.
24 2019-03-30 43L 35.00000 3 1 20 1
25 2019-04-07 11B 37.53333 <NA> <NA> <NA> 0
.
.
.
31 2019-04-07 15B 40.00000 10 6 18 1
32 2019-04-07 43L 30.00000 3 2 18 1
33 2019-04-07 43L 30.00000 3 2 18 1
I am analyzing data of patient admission/discharge in a number of hospitals for various inconsistencies.
My data structure is like -
Row_id ; nothing but a unique identifier of records (used as foreign key in some other table)
patient_id : unique identifier key for a patient
pack_id : the medical package chosen by the patient for treatment
hospital_id : unique identifier for a hospital
admn_dt : the date of admission
discharge_date : the date of discharge of patient
Snapshot of data
row_id patient_id pack_id hosp_id admn_date discharge_date
1 1 12 1 01-01-2020 14-01-2020
2 1 62 2 03-01-2020 15-01-2020
3 1 77 1 16-01-2020 27-01-2020
4 1 86 1 18-01-2020 19-01-2020
5 1 20 2 22-01-2020 25-01-2020
6 2 55 3 01-01-2020 14-01-2020
7 2 86 3 03-01-2020 17-01-2020
8 2 72 4 16-01-2020 27-01-2020
9 1 7 1 26-01-2020 30-01-2020
10 3 54 5 14-01-2020 22-01-2020
11 3 75 5 09-02-2020 17-02-2020
12 3 26 6 22-01-2020 05-02-2020
13 4 21 7 14-04-2020 23-04-2020
14 4 12 7 23-04-2020 29-04-2020
15 5 49 8 17-03-2020 26-03-2020
16 5 35 9 27-02-2020 07-03-2020
17 6 51 10 12-04-2020 15-04-2020
18 7 31 11 11-02-2020 17-02-2020
19 8 10 12 07-03-2020 08-03-2020
20 8 54 13 20-03-2020 23-03-2020
sample dput of data is as under:
df <- structure(list(row_id = c("1", "2", "3", "4", "5", "6", "7",
"8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18",
"19", "20"), patient_id = c("1", "1", "1", "1", "1", "2", "2",
"2", "1", "3", "3", "3", "4", "4", "5", "5", "6", "7", "8", "8"
), pack_id = c("12", "62", "77", "86", "20", "55", "86", "72",
"7", "54", "75", "26", "21", "12", "49", "35", "51", "31", "10",
"54"), hosp_id = c("1", "2", "1", "1", "2", "3", "3", "4", "1",
"5", "5", "6", "7", "7", "8", "9", "10", "11", "12", "13"), admn_date = structure(c(18262,
18264, 18277, 18279, 18283, 18262, 18264, 18277, 18287, 18275,
18301, 18283, 18366, 18375, 18338, 18319, 18364, 18303, 18328,
18341), class = "Date"), discharge_date = structure(c(18275,
18276, 18288, 18280, 18286, 18275, 18278, 18288, 18291, 18283,
18309, 18297, 18375, 18381, 18347, 18328, 18367, 18309, 18329,
18344), class = "Date")), row.names = c(NA, -20L), class = "data.frame")
I have to identify the records where patient got admitted without discharge from previous treatment. For this I have used the following code taking help from this thread How to know customers who placed next order before delivery/receiving of earlier order? In R -
library(tidyverse)
df %>% arrange(patient_id, admn_date, discharge_date) %>%
mutate(sort_key = row_number()) %>%
pivot_longer(c(admn_date, discharge_date), names_to ="activity",
values_to ="date", names_pattern = "(.*)_date") %>%
mutate(activity = factor(activity, ordered = T,
levels = c("admn", "discharge")),
admitted = ifelse(activity == "admn", 1, -1)) %>%
group_by(patient_id) %>%
arrange(date, sort_key, activity, .by_group = TRUE) %>%
mutate (admitted = cumsum(admitted)) %>%
ungroup() %>%
filter(admitted >1, activity == "admn")
This give me nicely all the records where patients got admission without being discharged from previous treatment.
Output-
# A tibble: 6 x 8
row_id patient_id pack_id hosp_id sort_key activity date admitted
<chr> <chr> <chr> <chr> <int> <ord> <date> <dbl>
1 2 1 62 2 2 admn 2020-01-03 2
2 4 1 86 1 4 admn 2020-01-18 2
3 5 1 20 2 5 admn 2020-01-22 2
4 9 1 7 1 6 admn 2020-01-26 2
5 7 2 86 3 8 admn 2020-01-03 2
6 8 2 72 4 9 admn 2020-01-16 2
Explanation-
Row_id 2 is correct because it overlaps with row_id 1
Row_id 4 is correct because it overlaps with row_id 3
Row_id 5 is correct because it overlaps with row_id 3 (again)
Row_id 9 is correct because it overlaps with row_id 3 (again)
Row_id 7 is correct becuase it overlaps with row_id 6
Row_id 8 is correct becuase it overlaps with row_id 7
Now I am stuck at a given validation rule that patients are allowed to take admission in same hospital n number of times without actually validating for their previous discharge. In other words, I have to extract only those records where patients got admitted in a different hospital without being discharged from 'another hospital. If the hospital would have been same, the group_by at hosp_id field could have done the work for me, but here the case is actually reverse. For same hosp_id it is allowed but for different it is not allowed.
Please help how may I proceed?
If I could map the resultant row_id with its overlapping record's row_id, may be we can solve the problem.
Desired Output-
row_id
2
5
8
because row_ids 4,, 9 and 7 overlaps with record having same hospital id.
Thanks in advance.
P.S. Though a desired solution has been given, I want to know can it done through map/apply group of function and/or through data.table package?
Is this what you're looking for? (Refer to the comments in the code for details. I can provide clarifications if necessary.)
#Your data
df <- structure(list(row_id = c("1", "2", "3", "4", "5", "6", "7",
"8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18",
"19", "20"), patient_id = c("1", "1", "1", "1", "1", "2", "2",
"2", "1", "3", "3", "3", "4", "4", "5", "5", "6", "7", "8", "8"
), pack_id = c("12", "62", "77", "86", "20", "55", "86", "72",
"7", "54", "75", "26", "21", "12", "49", "35", "51", "31", "10",
"54"), hosp_id = c("1", "2", "1", "1", "2", "3", "3", "4", "1",
"5", "5", "6", "7", "7", "8", "9", "10", "11", "12", "13"), admn_date = structure(c(18262,
18264, 18277, 18279, 18283, 18262, 18264, 18277, 18287, 18275,
18301, 18283, 18366, 18375, 18338, 18319, 18364, 18303, 18328,
18341), class = "Date"), discharge_date = structure(c(18275,
18276, 18288, 18280, 18286, 18275, 18278, 18288, 18291, 18283,
18309, 18297, 18375, 18381, 18347, 18328, 18367, 18309, 18329,
18344), class = "Date")), row.names = c(NA, -20L), class = "data.frame")
#Solution
library(dplyr)
library(tidyr)
library(stringr)
library(magrittr)
library(lubridate)
#Convert patient_id column into numeric
df$patient_id <- as.numeric(df$patient_id)
#Create empty (well, 1 row) data.frame to
#collect output data
#This needs three additional columns
#(as indicated)
outdat <- data.frame(matrix(nrow = 1, ncol = 9), stringsAsFactors = FALSE)
names(outdat) <- c(names(df), "ref_discharge_date", "ref_hosp_id", "overlap")
#Logic:
#For each unique patient_id take all
#their records.
#For each row of each such set of records
#compare its discharge_date with the admn_date
#of all other records with admn_date >= its own
#admn_date
#Then register the time interval between this row's
#discharge_date and the compared row's admn_date
#as a numeric value ("overlap")
#The idea is that concurrent hospital stays will have
#negative overlaps as the admn_date (of the current stay)
#will precede the discharge_date (of the previous one)
for(i in 1:length(unique(df$patient_id))){
#i <- 7
curdat <- df %>% filter(patient_id == unique(df$patient_id)[i])
curdat %<>% mutate(admn_date = lubridate::as_date(admn_date),
discharge_date = lubridate::as_date(discharge_date))
curdat %<>% arrange(admn_date)
for(j in 1:nrow(curdat)){
#j <- 1
currow <- curdat[j, ]
#otrows <- curdat[-j, ]
#
otrows <- curdat %>% filter(admn_date >= currow$admn_date)
#otrows <- curdat
for(k in 1:nrow(otrows)){
otrows$ref_discharge_date[k] <- currow$discharge_date
#otrows$refdisc[k] <- as_date(otrows$refdisc[k])
otrows$ref_hosp_id[k] <- currow$hosp_id
otrows$overlap[k] <- as.numeric(difftime(otrows$admn_date[k], currow$discharge_date))
}
otrows$ref_discharge_date <- as_date(otrows$ref_discharge_date)
outdat <- bind_rows(outdat, otrows)
}
}
rm(curdat, i, j, k, otrows, currow)
#Removing that NA row + removing all self-rows
outdat %<>%
filter(!is.na(patient_id)) %>%
filter(discharge_date != ref_discharge_date)
#Filter out only negative overlaps
outdat %<>% filter(overlap < 0)
#Filter out only those records where the patient
#was admitted to different hospitals
outdat %<>% filter(hosp_id != ref_hosp_id)
outdat
# row_id patient_id pack_id hosp_id admn_date discharge_date ref_discharge_date ref_hosp_id overlap
# 1 2 1 62 2 2020-01-03 2020-01-15 2020-01-14 1 -11
# 2 5 1 20 2 2020-01-22 2020-01-25 2020-01-27 1 -5
# 3 8 2 72 4 2020-01-16 2020-01-27 2020-01-17 3 -1
Group by the patient id again and then count the hospital IDs. Then merge that back on and filter the data.
Something like:
admitted_not_validated %>%
left_join(
admitted_not_validated %>%
group_by(patient_id) %>%
summarize (multi_hosp = length(unique(hosp_id)),.groups ='drop'),
by = 'patient_id') %>%
filter(multi_hosp >1)
This question already has answers here:
How to reshape data from long to wide format
(14 answers)
pivot_wider when there's no names column (or when names column should be created)
(2 answers)
Closed 2 years ago.
I have the dataframe below:
dput(Moment[1:15,])
structure(list(SectionCut = c("1", "1", "1", "1", "2", "2", "2",
"2", "3", "3", "3", "3", "Left", "Left", "Left"), N_l = c("1",
"2", "3", "4", "1", "2", "3", "4", "1", "2", "3", "4", "1", "2",
"3"), UG = c("84", "84", "84", "84", "84", "84", "84", "84",
"84", "84", "84", "84", "84", "84", "84"), S = c("12", "12",
"12", "12", "12", "12", "12", "12", "12", "12", "12", "12", "12",
"12", "12"), Sample = c("S00", "S00", "S00", "S00", "S00", "S00",
"S00", "S00", "S00", "S00", "S00", "S00", "S00", "S00", "S00"
), DF = c(0.367164093630677, 0.540130283330855, 0.590662743113521,
0.497030982705986, 0.000319303760901125, 0.000504925126205843,
0.00051127115578891, 0.000395434233037301, 0.413218926236695,
0.610726262711904, 0.685000816613652, 0.59474035159783, 0.483354599644366,
0.645710184115934, 0.625883097885242)), row.names = c(NA, -15L
), class = c("tbl_df", "tbl", "data.frame"))
I want to separate the content of the column by pivoting the SectionCut column. I would basically want to use the opposite of pivot_longer somehow... so at the end the values in column DF will be shown under 5 different columns (the values of SectionCut = c("1", "2", "3", "left", "right")
We could use pivot_wider from tidyr after creating a sequence column with rowid
library(dplyr)
library(tidyr0
library(data.table)
Moment %>%
mutate(rn = rowid(SectionCut)) %>%
pivot_wider(names_from = SectionCut, values_from = DF)
-output
# A tibble: 4 x 9
# N_l UG S Sample rn `1` `2` `3` Left
# <chr> <chr> <chr> <chr> <int> <dbl> <dbl> <dbl> <dbl>
#1 1 84 12 S00 1 0.367 0.000319 0.413 0.483
#2 2 84 12 S00 2 0.540 0.000505 0.611 0.646
#3 3 84 12 S00 3 0.591 0.000511 0.685 0.626
#4 4 84 12 S00 4 0.497 0.000395 0.595 NA
I'm having trouble finding the mean for a subset of data. Here are the two questions I'm hoping to answer. The first seems to be working fine, but the second returns the same answer as the first, but without numbers to the right of the decimal place. What's going on?
There is also an error that appears:
NAs introduced by coercionNAs introduced by coercionNAs introduced by coercionNAs introduced by coercion
# What is the mean suspension rate for schools by farms overall?
aggregate(suspension_rate_total ~ farms, merged_data, FUN = function(suspension_rate_total)
mean(as.numeric(as.character(suspension_rate_total))))
# What is the mean suspension rate for schools with farms > 100?
aggregate(suspension_rate_total ~ farms, merged_data, FUN = function(suspension_rate_total)
mean(as.numeric(as.character(suspension_rate_total))), subset = farms< 100)
Data
merged_data <- structure(list(schid = c("1030642", "1030766", "1030774", "1030840",
"1130103", "1230150", "1530435", "1530492", "1530500", "1931047",
"1931708", "1931864", "1932623", "1933746", "1937226", "1938554",
"1938612", "1938885", "1995836", "1996016"), farms = c("132",
"116", "348", "406", "68", "130", "370", "204", "225", "2,616",
"1,106", "1,918", "1,148", "2,445", "1,123", "1,245", "1,369",
"1,073", "932", "178"), foster = c("2", "0", "1", "8", "1", "4",
"4", "0", "0", "22", "11", "12", "2", "8", "13", "13", "4", "3",
"2", "3"), homeless = c("14", "0", "8", "4", "1", "4", "5", "0",
"14", "35", "42", "116", "9", "8", "34", "54", "26", "31", "5",
"11"), migrant = c("0", "0", "0", "0", "0", "0", "18", "0", "0",
"0", "0", "0", "0", "0", "0", "1", "0", "0", "0", "0"), ell = c("18",
"12", "114", "45", "7", "4", "50", "28", "26", "274", "212",
"325", "95", "112", "232", "185", "121", "84", "24", "35"), suspension_rate_total = c("*",
"20", "0", "0", "95", "5", "*", "256", "78", "33", "20", "1",
"218", "120", "0", "0", "*", "*", "*", "0"), suspension_violent = c("*",
"9", "0", "0", "20", "2", "*", "38", "0", "6", "3", "0", "53",
"35", "0", "0", "*", "*", "*", "0"), suspension_violent_no_injury = c("*",
"6", "0", "0", "47", "1", "*", "121", "52", "7", "13", "1", "77",
"44", "0", "0", "*", "*", "*", "0"), suspension_weapon = c("*",
"0", "0", "0", "8", "0", "*", "1", "0", "1", "1", "0", "4", "3",
"0", "0", "*", "*", "*", "0"), suspension_drug = c("*", "0",
"0", "0", "9", "1", "*", "59", "12", "16", "0", "0", "6", "5",
"0", "0", "*", "*", "*", "0"), suspension_defiance = c("*", "1",
"0", "0", "9", "1", "*", "16", "12", "0", "3", "0", "69", "30",
"0", "0", "*", "*", "*", "0"), suspension_other = c("*", "4",
"0", "0", "2", "0", "*", "21", "2", "3", "0", "0", "9", "3",
"0", "0", "*", "*", "*", "0")), row.names = c(NA, 20L), class = "data.frame")
Thank you so much.
Image-1
Image-2
Tidy up your data:
# replace * with NA
merged_data$suspension_rate_total[merged_data$suspension_rate_total == '*'] <- NA
# convert character to numeric format
merged_data$suspension_rate_total <- as.numeric(merged_data$suspension_rate_total)
# remove comma in strings and convert character to numeric format
merged_data$farms <- as.numeric(gsub(",", "", merged_data$farms))
Output
# What is the mean suspension rate for schools by farms overall?
aggregate(suspension_rate_total ~ farms, merged_data, FUN = mean, na.rm = TRUE)
# farms suspension_rate_total
# 1 68 95
# 2 116 20
# 3 130 5
# 4 178 0
# 5 204 256
# 6 225 78
# 7 348 0
# 8 406 0
# 9 1106 20
# 10 1123 0
# 11 1148 218
# 12 1245 0
# 13 1918 1
# 14 2445 120
# 15 2616 33
# What is the mean suspension rate for schools with farms > 100?
aggregate(suspension_rate_total ~ farms, merged_data, FUN = mean, na.rm = TRUE, subset = farms > 100)
# farms suspension_rate_total
# 1 116 20
# 2 130 5
# 3 178 0
# 4 204 256
# 5 225 78
# 6 348 0
# 7 406 0
# 8 1106 20
# 9 1123 0
# 10 1148 218
# 11 1245 0
# 12 1918 1
# 13 2445 120
# 14 2616 33
Are you sure 'NA's introduced by coercion' is a error and not a warning.
When you convert a character column to numeric :
as.numeric(as.character(suspension_rate_total)) , the blanks are coerced into NA's , which is intimated through warnings.
Also, I get different answers for both blocks of code
> aggregate(suspension_rate_total ~ farms, merged_data, FUN = function(suspension_rate_total)
+ mean(as.numeric(as.character(suspension_rate_total))))
farms suspension_rate_total
1 68 95
2 116 20
3 130 5
4 132 NA
5 178 0
6 204 256
7 225 78
8 348 0
9 370 NA
10 406 0
11 932 NA
> aggregate(suspension_rate_total ~ farms, merged_data, FUN = function(suspension_rate_total)
+ mean(as.numeric(as.character(suspension_rate_total))), subset = farms< 100)
farms suspension_rate_total
1 68 95
>
>
Further, the comment on you second block of code mention farms > 100? , but in you code you used subset = farms< 100
I need to select a unique ID.x for each ID.y (forming unique pairs) that minimizes a distance value, starting from the lowest distance values. I feel like it's a bit like a sudoku puzzle because each x and y can only be used once, so information from each pair allows for matching other pairs.
In the example below, ID.x 55 is a better match for ID.y 1 than ID.x 56 is, because ID.x 56 is a better match for ID.y 2. Similarly, ID.x 58 can be matched to ID.y 4, because any other available option would be a greater distance, and ID.y 5 can then take ID.x 59 at distance 4. However, ID.y 7 cannot be matched because ID.x 61 and ID.x 62 are equally close.
Example:
DT = data.table(
ID.x = c("55", "55", "55", "55", "55", "55", "55", "56", "56", "56", "56", "56", "56", "56", "57", "57", "57", "57", "57", "57", "57", "58", "58", "58", "58", "58", "58", "58", "59", "59", "59", "59", "59", "59", "59", "60", "60", "60", "60", "60", "60", "60", "61", "61", "61", "61", "61", "61", "61", "62", "62", "62", "62", "62", "62", "62"),
ID.y = c("1", "2", "3", "4", "5", "6", "7", "1", "2", "3", "4", "5", "6", "7", "1", "2", "3", "4", "5", "6", "7", "1", "2", "3", "4", "5", "6", "7", "1", "2", "3", "4", "5", "6", "7", "1", "2", "3", "4", "5", "6", "7", "1", "2", "3", "4", "5", "6", "7", "1", "2", "3", "4", "5", "6", "7"),
distance = c("2", "3", "3", "4", "6", "6", "7", "2", "1", "2", "5", "5", "5", "6", "4", "4", "3", "5", "5", "5", "6", "5", "5", "5", "4", "4", "5", "6", "7", "7", "7", "6", "4", "6", "7", "6", "6", "6", "6", "4", "2", "5", "7", "7", "7", "7", "5", "5", "5", "6", "6", "6", "6", "4", "4", "5")
)
Goal:
ID.x ID.y distance
1: 55 1 2
2: 56 2 1
3: 57 3 3
4: 58 4 4
5: 59 5 4
6: 60 6 2
7: NA 7 NA
This first attempt, inspired by this question, does not work:
DT[DT[, .I[distance == min(distance)], by=ID.x]$V1][DT[, .I[1], by = ID.y]$V1]
UPDATE:
In response to the answers by #chinsoon12 and #paweł-chabros, here is an updated data.table that fixes a few things. It swaps x and y (my original question was matching x's with y's, but the more natural interpretation is y with x). This example removes the ambiguous matching for ID.y 7. In this example, the lowest distance matches ID.x 63. Separately, I also added a new ID.y 8, to clarify when no unambiguous match is possible (it matches ID.x 64 and 65 equally well). The answer should not select a match arbitrarily.
DT = data.table(
ID.y = c("55", "55", "55", "55", "55", "55", "55", "55", "56", "56", "56", "56", "56", "56", "56", "56", "57", "57", "57", "57", "57", "57", "57", "57", "58", "58", "58", "58", "58", "58", "58", "58", "59", "59", "59", "59", "59", "59", "59", "59", "60", "60", "60", "60", "60", "60", "60", "60", "61", "61", "61", "61", "61", "61", "61", "61", "62", "62", "62", "62", "62", "62", "62", "62", "63", "63", "63", "63", "63", "63", "63", "63", "64", "64", "64", "64", "64", "64", "64", "64", "65", "65", "65", "65", "65", "65", "65", "65"),
ID.x = c("1", "2", "3", "4", "5", "6", "7", "8", "1", "2", "3", "4", "5", "6", "7", "8", "1", "2", "3", "4", "5", "6", "7", "8", "1", "2", "3", "4", "5", "6", "7", "8", "1", "2", "3", "4", "5", "6", "7", "8", "1", "2", "3", "4", "5", "6", "7", "8", "1", "2", "3", "4", "5", "6", "7", "8", "1", "2", "3", "4", "5", "6", "7", "8", "1", "2", "3", "4", "5", "6", "7", "8", "1", "2", "3", "4", "5", "6", "7", "8", "1", "2", "3", "4", "5", "6", "7", "8"),
distance = c(2, 3, 3, 4, 6, 6, 7, 15, 2, 1, 2, 5, 5, 5, 6, 15, 4, 4, 3, 5, 5, 5, 6, 15, 5, 5, 5, 4, 4, 5, 6, 15, 7, 7, 7, 6, 4, 6, 7, 15, 6, 6, 6, 6, 4, 2, 5, 15, 7, 7, 7, 7, 5, 5, 6, 15, 6, 6, 6, 6, 4, 4, 10, 15, 11, 11, 11, 11, 11, 11, 5, 12, 11, 11, 11, 11, 11, 11, 11, 1, 11, 11, 11, 11, 11, 11, 11, 1)
)
Expected Result:
ID.y ID.x distance
1: 55 1 2
2: 56 2 1
3: 57 3 3
4: 58 4 4
5: 59 5 4
6: 60 6 2
7: 63 7 5
8: NA 8 NA
I'm using this code is to complete a fuzzy join using stringdist_join, as described in this question. I have two datasets that need matching (hence the ID.x and ID.y). In my case, I have pre-test and post-test scores that need to be matched by multiple unreliable characteristics.
Not clear to me why why ID.x 62 and ID.y 7 distance 5 is not feasible.
Assuming that ID.x 62, ID.y 7 and distance 5 is acceptable, a possible approach using data.table:
setorder(DT, distance)
choseny <- c()
ans <- DT[,
{
y <- setdiff(ID.y, choseny)[1L]
choseny <- c(choseny, y)
.(ID.y=y, dist=.SD[ID.y==y, distance[1L]])
},
by=.(ID.x)]
setorder(ans, ID.x)[]
output:
ID.x ID.y dist
1: 55 1 2
2: 56 2 1
3: 57 3 3
4: 58 4 4
5: 59 5 4
6: 60 6 2
7: 61 <NA> <NA>
8: 62 7 5
I am not sure if that's really the desired solution, but it should be helpful. Not super elegant, but it pretty much looks like the desired output.
DT[, .(ID.y
, distance
, Row.Num = rank(distance)
, Row.Num.ID = rank(ID.y)), by = list(ID.x)][, .SD[Row.Num == min(Row.Num) ], by = ID.x][, .SD[Row.Num.ID == min(Row.Num.ID) ], by = ID.x]
> ID.x ID.y distance Row.Num Row.Num.ID
1: 55 1 2 1.0 1
2: 56 2 1 1.0 2
3: 57 3 3 1.0 3
4: 58 4 4 1.5 4
5: 59 5 4 1.0 5
6: 60 6 2 1.0 6
7: 61 5 5 2.0 5
8: 62 5 4 1.5 5
I don't know data.table well so I can give you only tidyverse solution. But maybe it will help you :)
library(tidyverse)
ID_y <- unique(DT$ID.y)
DT %>%
as_tibble() %>%
group_by(ID.x) %>%
mutate(min_dist = min(distance)) %>%
arrange(min_dist) %>%
nest() %>%
mutate(data = data %>% map(~ {
min_row <- .x %>%
filter(ID.y %in% ID_y) %>%
filter(distance == min(distance)) %>%
slice(1)
ID_y <<- ID_y[ID_y != min_row$ID.y]
min_row
})) %>%
unnest() %>%
select(-min_dist) %>%
arrange(ID.x)
I am saving all unique values of ID.y. Then I calculate minimum distance for all combinations and arrange by this minimum distance to tackle those ones at first in map loop. After filtering the minimum distance I remove ID.y from the vector, so other ID.x are searching only in ID.y's that left.