Subset dataframe based on Values in second dataframe - r

I have one dataframe, df, that has two columns as such:
> head(df1[,c(10,11)])
ColA ColB
1 12 20
2 7 5
3 32 38
4 37 46
5 15 15
6 4 4
I have a second dataframe, also with 2 columns with matching names. Instead, there are only two numbers, as such:
> head(df2)
ColA ColB
1 50 30
I want to subset values from df1 based on the value in the corresponding column from df2 . Doing this manually would look like this:
colA_vector <- df1[df1$colA < 50,]
colB_vector <- df1[df1$ColB < 30,]
How can I do so in a more general purpose way? I do not want to hardcode anything. The column name "ColA" or "ColB" could be anything (so solutions requiring those column names won't really work).
Thank you.

In base R we could do:
nms <- intersect(names(df1), names(df2))
df1[do.call(`&`, Map(`<`, df1[nms], df2[nms])),]
# ColA ColB
# 1 12 20
# 2 7 5
# 5 15 15
# 6 4 4
Or just df1[do.call('&', Map('<', df1, df2)),] if both data.frames have the same order of columns and same names.
Using the package fuzzyjoin might be more readable however:
library(fuzzy_join)
fuzzy_semi_join(df1, df2, match_fun = `<`)
# ColA ColB
# 1 12 20
# 2 7 5
# 5 15 15
# 6 4 4
data
df1 <- read.table(text="
ColA ColB
1 12 20
2 7 5
3 32 38
4 37 46
5 15 15
6 4 4",h=T,strin=F)
df2 <- read.table(text="ColA ColB
1 50 30",h=T,strin=F)

Create a function if we want to do the same task repeatedly
f1 <- function(dat1, dat2, colName) {
dat1[dat1[[colName]] < dat2[[colName]],]
}
f1(df1, df2, "ColA")
# ColA ColB
#1 12 20
#2 7 5
#3 32 38
#4 37 46
#5 15 15
#6 4 4
f1(df1, df2, "ColB")
# ColA ColB
#1 12 20
#2 7 5
#5 15 15
#6 4 4
data
df1 <- structure(list(ColA = c(12L, 7L, 32L, 37L, 15L, 4L), ColB = c(20L,
5L, 38L, 46L, 15L, 4L)), class = "data.frame", row.names = c(NA,
-6L))
df2 <- structure(list(ColA = 50L, ColB = 30L),
class = "data.frame", row.names = "1")

Using dplyr:
df1 %>%
filter(df1[,1] < df2[,1])
ColA ColB
1 12 20
2 7 5
3 32 38
4 37 46
5 15 15
6 4 4
df1 %>%
filter(df1[,2] < df2[,2])
ColA ColB
1 12 20
2 7 5
3 15 15
4 4 4
Subsetting based on both columns simultaneously:
df1 %>%
filter(df1[,1] < df2[,1] & df1[,2] < df2[,2])
ColA ColB
1 12 20
2 7 5
3 15 15
4 4 4

If you don't want to use the fuzzyjoin join package or make your own function, you can just repeat the second dataframe.
df1 <- data.frame("ColA" = c(12, 7, 32),
"ColB" = c(20, 5, 38))
df2 <- data.frame("ColA" = 50,
"ColB" = 30)
n <- nrow(df1)
df2_new <- do.call("rbind", replicate(n, df2, simplify = FALSE))
df1_which <- as.data.frame(df1 < df2_new)
colA_vector <- df1[df1_which$ColA, "ColA"]
colB_vector <- df1[df1_which$ColB, "ColB"]

You can try a tidyverse funtion. Result is a list of the filtered data.frames.
foo <- function(x, y, ColA, ColB){
require(tidyverse)
var1 <- quo_name(ColA)
var2 <- quo_name(ColB)
x %>%
select(a=!!var1, b=!!var2) %>%
mutate(colA_vector= a < y[[ColA]]) %>%
mutate(colB_vector= b < y[[ColB]]) %>%
gather(k, v, -a, -b) %>%
filter(v) %>%
split(.$k) %>%
map(~select(.,-v,-k))
}
foo(df1, df2, "ColA", "ColB")
$colA_vector
a b
1 12 20
2 7 5
3 32 38
4 37 46
5 15 15
6 4 4
$colB_vector
a b
7 12 20
8 7 5
9 15 15
10 4 4

Related

Add a column from one dataframe in a list to another dataframe in another list with map2

I have two lists with different dataframes (original data has 70 dataframes for each list, totalising 2 million rows).
df1 <- data.frame(colA = LETTERS[1:5], colB = seq(1,5))
df2 <- data.frame(colA = LETTERS[6:10], colB = seq(6,10))
list1 <- list(df1,df2)
df3 <- data.frame(colA = LETTERS[1:5], colB = seq(11,15))
df4 <- data.frame(colA = LETTERS[6:10], colB = seq(16,20))
list2 <- list(df3,df4)
I want either to create a new list (e.g., desired_list) or to update the existing list1, adding colB from each dataframe of list2 (df3 and df4) naming the new column accordingly and re-naming the old ones.
list1 and list2 have the same number of dataframes, each of them has the same number of rows (df1 has the same number of rows of df3, df2 of df4, and so on). The desired output should looks like this.
desired_df1 <- data.frame(colA = LETTERS[1:5], colB_1 = seq(1,5), colB_2 = seq(11,15))
desired_df2 <- data.frame(colA = LETTERS[6:10], colB_1 = seq(6,10), colB_2 = seq(16,20))
desired_list <- list(desired_df1,desired_df2)
I think I can do this with purrr::map2 but I´am not very familiar with lists and I am having difficulties with map2 formatting and indexing. So far I tried:
desired_list <- lapply(list1,
function(df) {purrr::map2(.x = list1, .y = list2, .f = list1$colB_2 <- list2$colB)})
But I get:
Error in `as_mapper()`:
! Can't convert `.f`, NULL, to a function.
Something like the following:
map2(list1, list2,
~data.frame(colA = .x$colA,
colB_1 = .x$colB,
colB_2 = .y$colB))
##> [[1]]
##> colA colB_1 colB_2
##> 1 A 1 11
##> 2 B 2 12
##> 3 C 3 13
##> 4 D 4 14
##> 5 E 5 15
##>
##> [[2]]
##> colA colB_1 colB_2
##> 1 F 6 16
##> 2 G 7 17
##> 3 H 8 18
##> 4 I 9 19
##> 5 J 10 20
You can use plain old Map for this:
Map(function(a, b) { a$colB_2 <- b$colB; a }, list1, list2)
#> [[1]]
#> colA colB colB_2
#> 1 A 1 11
#> 2 B 2 12
#> 3 C 3 13
#> 4 D 4 14
#> 5 E 5 15
#>
#> [[2]]
#> colA colB colB_2
#> 1 F 6 16
#> 2 G 7 17
#> 3 H 8 18
#> 4 I 9 19
#> 5 J 10 20
Or may also extract colB from list2 and transform the list1
Map(transform, list1, colB_2 = lapply(list2, `[[`, "colB"))
-output
[[1]]
colA colB colB_2
1 A 1 11
2 B 2 12
3 C 3 13
4 D 4 14
5 E 5 15
[[2]]
colA colB colB_2
1 F 6 16
2 G 7 17
3 H 8 18
4 I 9 19
5 J 10 20

Simultaneously remove the first and last rows of a data frame until reaching a row that does not have an NA

I have a dataframe that contains NA values, and I want to remove some rows that have an NA (i.e., not complete cases). However, I only want to remove rows at the beginning and ending of the dataframe. So, I want to keep any rows that have an NA that are not in the first or last rows of the dataframe. What is the most efficient way to simultaneously remove these rows with NAs without using a row index? This is related to my previous question, but I also want to remove the first rows at the same time. There are other posts that also focus on removing only the first rows, but not both.
Data
df <- structure(list(var1 = 1:15,
var2 = c(3, NA, 3, NA, 2, NA, 3, 4, 2, NA, 4, 2, 45, 2, 1),
var3 = c(6L, 7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L, 16L, NA, NA, NA, NA),
var4 = c(NA, 7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L, 19L, NA)),
class = "data.frame", row.names = c(NA, -15L))
Expected Output
So, in this example, I removed rows 1 to 2, and 12 to 15 since they have an NA and row 3 and 11 does not have an NA.
var1 var2 var3 var4
1 3 3 8 8
2 4 NA 9 9
3 5 2 10 10
4 6 NA 11 11
5 7 3 12 12
6 8 4 13 13
7 9 2 14 14
8 10 NA 15 15
9 11 4 16 16
I know that I could have 2 statements in filter to remove the top and bottom rows (shown below). But I'm wondering if there is a more efficient way to do this with really large datasets (open to any method tidyverse, base R, data.table, etc.).
library(dplyr)
df %>%
filter(cumsum(complete.cases(.)) != 0 &
rev(cumsum(rev(complete.cases(.)))) != 0)
base R
r <- rle(complete.cases(df))
str(r, vec.len = 9)
# List of 2
# $ lengths: int [1:9] 2 1 1 1 1 3 1 1 4
# $ values : logi [1:9] FALSE TRUE FALSE TRUE FALSE TRUE FALSE TRUE FALSE
# - attr(*, "class")= chr "rle"
r$values[ -c(1, length(r$values)) ] <- TRUE
str(r, vec.len = 9)
# List of 2
# $ lengths: int [1:9] 2 1 1 1 1 3 1 1 4
# $ values : logi [1:9] FALSE TRUE TRUE TRUE TRUE TRUE TRUE TRUE FALSE
# - attr(*, "class")= chr "rle"
df[inverse.rle(r),]
# var1 var2 var3 var4
# 3 3 3 8 8
# 4 4 NA 9 9
# 5 5 2 10 10
# 6 6 NA 11 11
# 7 7 3 12 12
# 8 8 4 13 13
# 9 9 2 14 14
# 10 10 NA 15 15
# 11 11 4 16 16
dplyr
For your question of efficiency, you can adapt the rle solution to dplyr as well (that should be trivial), but I see no reason why the use of complete.cases and cumany/rev would be a problem. You can improve on your attempt by not calculating complete.cases(.) twice as you're doing, storing it in an interim column.
library(dplyr)
df %>%
mutate(aux = complete.cases(cur_data())) %>%
filter(cumany(aux) & rev(cumany(rev(aux))))
# var1 var2 var3 var4 aux
# 1 3 3 8 8 TRUE
# 2 4 NA 9 9 FALSE
# 3 5 2 10 10 TRUE
# 4 6 NA 11 11 FALSE
# 5 7 3 12 12 TRUE
# 6 8 4 13 13 TRUE
# 7 9 2 14 14 TRUE
# 8 10 NA 15 15 FALSE
# 9 11 4 16 16 TRUE
data.table
(Just an adaptation of the dplyr version.)
library(data.table)
setDT(df)
df[, aux := complete.cases(.SD)
][ cumsum(aux) > 0 & rev(cumsum(rev(aux)) > 0), ]
# var1 var2 var3 var4 aux
# <int> <num> <int> <int> <lgcl>
# 1: 3 3 8 8 TRUE
# 2: 4 NA 9 9 FALSE
# 3: 5 2 10 10 TRUE
# 4: 6 NA 11 11 FALSE
# 5: 7 3 12 12 TRUE
# 6: 8 4 13 13 TRUE
# 7: 9 2 14 14 TRUE
# 8: 10 NA 15 15 FALSE
# 9: 11 4 16 16 TRUE
I would do
na_count <- rowSums(is.na(df))
df <- df %>%
slice(min(which(na_count==0)):max(which(na_count==0)))
Output
> df
var1 var2 var3 var4
1 3 3 8 8
2 4 NA 9 9
3 5 2 10 10
4 6 NA 11 11
5 7 3 12 12
6 8 4 13 13
7 9 2 14 14
8 10 NA 15 15
9 11 4 16 16
I think we overcomplicated it a bit, most efficient I think is just plain base R
Directly take all your complete cases
s <- which(complete.cases(df))
We surely cannot subset on s, as we want to keep all the "in between" incomplete ones too, we can achieve that by simply subset from the first up till the last index.
df[first(s):last(s), ]
continuing a rle love fest:
(which(rle(rowSums(df_NA))$values != 'NA')[1]):dplyr::last(which(rle(rowSums(df_NA))$values != 'NA'))
[1] 3 4 5 6 7 8 9 10 11
or, dispensing with dplyr
(which(rle(rowSums(df_NA))$values != 'NA')[1]):(which(rle(rowSums(df_NA))$values != 'NA'))[[(length(which(rle(rowSums(df_NA))$values != 'NA')))]]
[1] 3 4 5 6 7 8 9 10 11
Another possible solution (thanks, #r2evans, for suggesting complete.cases):
library(dplyr)
df %>%
mutate(aux = !complete.cases(.)) %>%
filter(!cumall(aux)) %>%
arrange(desc(var1)) %>%
filter(!cumall(aux)) %>%
arrange(var1) %>%
select(-aux)
#> var1 var2 var3 var4
#> 1 3 3 8 8
#> 2 4 NA 9 9
#> 3 5 2 10 10
#> 4 6 NA 11 11
#> 5 7 3 12 12
#> 6 8 4 13 13
#> 7 9 2 14 14
#> 8 10 NA 15 15
#> 9 11 4 16 16
Bit late to the party, but base R in a single expression:
df[Reduce(
function(x, y){
seq(from = x, to = y)
},
range(
which(
complete.cases(df)
)
)
), ]
Benchmark
Here, I create a bigger dataset with 1,000,000 million rows of 3 variables to determine which method is the fastest. *Note: It will take a few seconds to apply the NA values randomly to the 3 columns for the first 100,000 rows and the last 100,000 rows. Essentially, with this example, we want to remove the first 100,000 rows and the last 100,000 rows.
Dataset
set.seed(203)
df <- data.frame(var1 = sample(x = 1:500, size = 1000000, replace = TRUE),
var2 = sample(x = 1:500, size = 1000000, replace = TRUE),
var3 = sample(x = 1:500, size = 1000000, replace = TRUE))
df[1:100000,] <- plyr::ddply(df[1:100000,], .(var1, var2, var3), function(x) {x[sample(x = 1:3, size = 1, replace = TRUE)] <- NA;x})
df[900000:1000000,] <- plyr::ddply(df[900000:1000000,], .(var1, var2, var3), function(x) {x[sample(x = 1:3, size = 1, replace = TRUE)] <- NA;x})
df[300000:400000,2] <- NA
Output
It looks like #MerijnvanTilborg data.table solution is the fastest, followed by #r2evans data.table version on this sample dataset.
Code
library(tidyverse)
library(data.table)
df1 <- df
dt1 <- as.data.table(df)
dt2 <- as.data.table(df)
bm <- microbenchmark::microbenchmark(baseR_r2evans = {r <- rle(complete.cases(df1));
r$values[ -c(1, length(r$values)) ] <- TRUE; df[inverse.rle(r),]},
dplyr_r2evans = {df %>%
dplyr::mutate(aux = complete.cases(cur_data())) %>%
dplyr::filter(cumany(aux) & rev(cumany(rev(aux))))},
datatable_r2evans = {dt1[, aux := complete.cases(.SD)
][ cumsum(aux) > 0 & rev(cumsum(rev(aux)) > 0), ]},
valkyr = {na_count <- rowSums(is.na(df)); df %>%
dplyr::slice(min(which(na_count==0)):max(which(na_count==0)))},
PaulS = {df %>%
dplyr::mutate(aux = !complete.cases(.)) %>%
dplyr::filter(!cumall(aux)) %>%
dplyr::arrange(desc(var1)) %>%
dplyr::filter(!cumall(aux)) %>%
dplyr::arrange(var1) %>%
dplyr::select(-aux)},
Chris = {df[(which(rle(rowSums(df))$values != 'NA')[1]):(which(rle(rowSums(df))$values != 'NA'))[[(length(which(rle(rowSums(df))$values != 'NA')))]],]},
AndrewGB = {df %>%
dplyr::filter(cumsum(complete.cases(.)) != 0 &
rev(cumsum(rev(complete.cases(.)))) != 0)},
Merijn_baseR = {s <- which(complete.cases(df));
df[first(s):last(s), ]},
Merijn_datatable = {dt2[, aux := complete.cases(.SD)][first(which(aux)):last(which(aux))]},
times = 1000
)

How to merge two dataframes with replacement/creation of rows depending on existence in first df?

I have two dataframes df1 and df2, I am looking for the simplest operation to get df3.
I want to replace rows in df1 with rows from df2 if id match (so rbind.fill is not a solution), and append rows from df2 where id does not exist in df1but only for columns that exist in df2.
I guess I could use several joins and antijoins and then merge but I wonder if there already exists a function for that operation.
df1 <- data.frame(id = 1:5, c1 = 11:15, c2 = 16:20, c3 = 21:25)
df2 <- data.frame(id = 4:7, c1 = 1:4, c2 = 5:8)
df1
id c1 c2 c3
1 11 16 21
2 12 17 22
3 13 18 23
4 14 19 24
5 15 20 25
df2
id c1 c2
4 1 5
5 2 6
6 3 7
7 4 8
df3
id c1 c2 c3
1 11 16 21
2 12 17 22
3 13 18 23
4 1 5 24
5 2 6 25
6 3 7 NULL
7 4 8 NULL
We can use {powerjoin}, make a full join and deal with the conflicts using coalesce_xy (which is really dplyr::coalesce) :
library(powerjoin)
df1 <- data.frame(id = 1:5, c1 = 11:15, c2 = 16:20, c3 = 21:25)
df2 <- data.frame(id = 4:7, c1 = 1:4, c2 = 5:8)
safe_full_join(df1, df2, by= "id", conflict = coalesce_xy)
# id c1 c2 c3
# 1 1 11 16 21
# 2 2 12 17 22
# 3 3 13 18 23
# 4 4 14 19 24
# 5 5 15 20 25
# 6 6 3 7 NA
# 7 7 4 8 NA
I ended up with :
special_combine <- function(df1, df2){
df1_int <- df1[, colnames(df1) %in% colnames(df2)]
df1_ext <- df1[, c("id", colnames(df1)[!colnames(df1) %in% colnames(df2)])]
df3 <- bind_rows(df1_int, df2)
df3 <- df3[!duplicated(df3$id, fromLast=TRUE), ] %>%
dplyr::left_join(df1_ext, by="id") %>%
dplyr::arrange(id)
df3
}

Aggregate dataframe in rolling blocks of 3 rows

I have the following data frame as an example
df <- data.frame(score=letters[1:15], total1=1:15, total2=16:30)
> df
score total1 total2
1 a 1 16
2 b 2 17
3 c 3 18
4 d 4 19
5 e 5 20
6 f 6 21
7 g 7 22
8 h 8 23
9 i 9 24
10 j 10 25
11 k 11 26
12 l 12 27
13 m 13 28
14 n 14 29
15 o 15 30
I would like to aggregate my data frame by sum by grouping the rows having different name, i.e.
groups sum1 sum2
'a-b-c' 6 51
'c-d-e' 21 60
etc
All the given answers to this kind of question assume that the strings repeat in the row.
The usual aggregate function that I use to obtain the summary delivers a different result:
aggregate(df$total1, by=list(sum1=df$score %in% c('a','b','c'), sum2=df$score %in% c('d','e','f')), FUN=sum)
sum1 sum2 x
1 FALSE FALSE 99
2 TRUE FALSE 6
3 FALSE TRUE 15
If you want a tidyverse solution, here is one possibility:
df <- data.frame(score=letters[1:15], total1=1:15, total2=16:30)
df %>%
mutate(groups = case_when(
score %in% c("a","b","c") ~ "a-b-c",
score %in% c("d","e","f") ~ "d-e-f"
)) %>%
group_by(groups) %>%
summarise_if(is.numeric, sum)
returns
# A tibble: 3 x 3
groups total1 total2
<chr> <int> <int>
1 a-b-c 6 51
2 d-e-f 15 60
3 <NA> 99 234
Add a "groups" column with the category value.
df$groups = NA
and then define each group like this:
df$groups[df$score=="a" | df$score=="b" | df$score=="c" ] = "a-b-c"
Finally aggregate by that column.
Here's a solution that works for any sized data frame.
df <- data.frame(score=letters[1:15], total1=1:15, total2=16:30)
# I'm adding a row to demonstrate that the grouping pattern works when the
# number of rows is not equally divisible by 3.
df <- rbind(df, data.frame(score = letters[16], total1 = 16, total2 = 31))
# A vector that represents the correct groupings for the data frame.
groups <- c(rep(1:floor(nrow(df) / 3), each = 3),
rep(floor(nrow(df) / 3) + 1, nrow(df) - length(1:(nrow(df) / 3)) * 3))
# Your method of aggregation by `groups`. I'm going to use `data.table`.
require(data.table)
dt <- as.data.table(df)
dt[, group := groups]
aggDT <- dt[, list(score = paste0(score, collapse = "-"),
total1 = sum(total1), total2 = sum(total2)), by = group][
, group := NULL]
aggDT
score total1 total2
1: a-b-c 6 51
2: d-e-f 15 60
3: g-h-i 24 69
4: j-k-l 33 78
5: m-n-o 42 87
6: p 16 31

Assign specific value based on two column in R

I have a dataframe which looks like:
Student_ID Number Position
VB-123 10 2
VB-456 15 5
VB-789 25 25
VB-889 12 2
VB-965 15 7
VB-758 45 9
VB-245 25 25
I want to add new column and assign a value based on below conditions:
If only Number is duplicate in entire dataframe then Assign A
If only Position is duplicate in entire dataframe then assign B
If both Number and Position are duplicate then assign C
If none of the duplicate then assign D.
Output would looks like:
Student_ID Number Position Assign
VB-123 10 2 B
VB-456 15 5 A
VB-789 25 25 C
VB-889 12 2 B
VB-965 15 7 A
VB-758 45 9 D
VB-245 25 25 C
With dplyr,
library(dplyr)
students <- data.frame(Student_ID = c("VB-123", "VB-456", "VB-789", "VB-889", "VB-965", "VB-758", "VB-245"),
Number = c(10L, 15L, 25L, 12L, 15L, 45L, 25L),
Position = c(2L, 5L, 25L, 2L, 7L, 9L, 25L))
students2 <- students %>%
mutate_at(vars(Number, Position), funs(n = table(.)[as.character(.)])) %>%
mutate(Assign = case_when(Number_n > 1 & Position_n > 1 ~ 'C',
Number_n > 1 ~ 'A',
Position_n > 1 ~ 'B',
TRUE ~ 'D'))
students2
#> Student_ID Number Position Number_n Position_n Assign
#> 1 VB-123 10 2 1 2 B
#> 2 VB-456 15 5 2 1 A
#> 3 VB-789 25 25 2 2 C
#> 4 VB-889 12 2 1 2 B
#> 5 VB-965 15 7 2 1 A
#> 6 VB-758 45 9 1 1 D
#> 7 VB-245 25 25 2 2 C
As an alternative to the mutate_at line, you could use add_count twice, renaming as necessary. To remove the intermediary columns, tack on select(-matches('_n$')).
You can more or less replicate the logic in base by assigning to subsets:
students2 <- cbind(students, lapply(students[2:3], function(x) table(x)[as.character(x)]))
students2$Assign <- 'D'
students2$Assign[students2$Number.Freq > 1 & students2$Position.Freq > 1] <- 'C'
students2$Assign[students2$Number.Freq > 1 & students2$Position.Freq == 1] <- 'A'
students2$Assign[students2$Number.Freq == 1 & students2$Position.Freq > 1] <- 'B'
students2[4:7] <- NULL
students2
#> Student_ID Number Position Assign
#> 1 VB-123 10 2 B
#> 2 VB-456 15 5 A
#> 3 VB-789 25 25 C
#> 4 VB-889 12 2 B
#> 5 VB-965 15 7 A
#> 6 VB-758 45 9 D
#> 7 VB-245 25 25 C
Here is an option using base R. Create a list of column names as in the order of evaluatin ('l1'), pre assign 'D' to create the 'Assign' column in 'dat', loop through the sequence of 'l1', subset the columns of data based on the column names in 'l1', use duplicated to find the duplicate elements and reassign the 'Assign' column to the corresponding LETTER
l1 <- list("Number", "Position", c("Number", "Position"))
dat$Assign <- rep("D", nrow(dat))
for(i in seq_along(l1)){
df <- dat[l1[[i]]]
i1 <- duplicated(df)|duplicated(df, fromLast = TRUE)
dat$Assign <- replace(dat$Assign, i1, LETTERS[i])
}
-output
dat
# Student_ID Number Position Assign
#1 VB-123 10 2 B
#2 VB-456 15 5 A
#3 VB-789 25 25 C
#4 VB-889 12 2 B
#5 VB-965 15 7 A
#6 VB-758 45 9 D
#7 VB-245 25 25 C
A solution using dplyr.
library(dplyr)
dat2 <- dat %>% count(Number)
dat3 <- dat %>% count(Position)
dat4 <- dat %>% count(Number, Position)
dat5 <- dat %>%
left_join(dat2, by = "Number") %>%
left_join(dat3, by = "Position") %>%
left_join(dat4, by = c("Number", "Position")) %>%
mutate(Assign = case_when(
n > 1 ~ "C",
n.x > 1 & n.y == 1 ~ "A",
n.y > 1 & n.x == 1 ~ "B",
TRUE ~ "D"
)) %>%
select(-n.x, -n.y, -n)
dat5
# Student_ID Number Position Assign
# 1 VB-123 10 2 B
# 2 VB-456 15 5 A
# 3 VB-789 25 25 C
# 4 VB-889 12 2 B
# 5 VB-965 15 7 A
# 6 VB-758 45 9 D
# 7 VB-245 25 25 C
DATA
dat <- read.table(text = "Student_ID Number Position
'VB-123' 10 2
'VB-456' 15 5
'VB-789' 25 25
'VB-889' 12 2
'VB-965' 15 7
'VB-758' 45 9
'VB-245' 25 25",
header = TRUE, stringsAsFactors = FALSE)

Resources