Related
I have a data frame loaded in R and I need to sum one row. The problem is that I've tried to use rowSums() function, but 2 columns are not numeric ones (one is character "Nazwa" and one is boolean "X" at the end of data frame). Is there any option to sum this row without those two columns? So I'd like to start from row 1, column 3 and don't include last column.
My data:
structure(list(Kod = c(0L, 200000L, 400000L, 600000L, 800000L,
1000000L), Nazwa = c("POLSKA", "DOLNOŚLĄSKIE", "KUJAWSKO-POMORSKIE",
"LUBELSKIE", "LUBUSKIE", "ŁÓDZKIE"), gospodarstwa.ogółem.gospodarstwa.2006.... = c(9187L,
481L, 173L, 1072L, 256L, 218L), gospodarstwa.ogółem.gospodarstwa.2007.... = c(11870L,
652L, 217L, 1402L, 361L, 261L), gospodarstwa.ogółem.gospodarstwa.2008.... = c(14896L,
879L, 258L, 1566L, 480L, 314L), gospodarstwa.ogółem.gospodarstwa.2009.... = c(17091L,
1021L, 279L, 1710L, 579L, 366L), gospodarstwa.ogółem.gospodarstwa.2010.... = c(20582L,
1227L, 327L, 1962L, 833L, 420L), gospodarstwa.ogółem.gospodarstwa.2011.... = c(23449L,
1322L, 371L, 2065L, 1081L, 478L), gospodarstwa.ogółem.gospodarstwa.2012.... = c(25944L,
1312L, 390L, 2174L, 1356L, 518L), gospodarstwa.ogółem.gospodarstwa.2013.... = c(26598L,
1189L, 415L, 2129L, 1422L, 528L), gospodarstwa.ogółem.gospodarstwa.2014.... = c(24829L,
1046L, 401L, 1975L, 1370L, 508L), gospodarstwa.ogółem.gospodarstwa.2015.... = c(22277L,
849L, 363L, 1825L, 1202L, 478L), gospodarstwa.ogółem.gospodarstwa.2016.... = c(22435L,
813L, 470L, 1980L, 1148L, 497L), gospodarstwa.ogółem.gospodarstwa.2017.... = c(20257L,
741L, 419L, 1904L, 948L, 477L), gospodarstwa.ogółem.gospodarstwa.2018.... = c(19207L,
713L, 395L, 1948L, 877L, 491L), gospodarstwa.ogółem.powierzchnia.użytków.rolnych.2006..ha. = c(228038L,
19332L, 4846L, 19957L, 12094L, 3378L), gospodarstwa.ogółem.powierzchnia.użytków.rolnych.2007..ha. = c(287529L,
21988L, 5884L, 23934L, 18201L, 3561L), gospodarstwa.ogółem.powierzchnia.użytków.rolnych.2008..ha. = c(314848L,
28467L, 5943L, 26892L, 18207L, 4829L), gospodarstwa.ogółem.powierzchnia.użytków.rolnych.2009..ha. = c(367062L,
26427L, 6826L, 30113L, 22929L, 5270L), gospodarstwa.ogółem.powierzchnia.użytków.rolnych.2010..ha. = c(519069L,
39703L, 7688L, 34855L, 35797L, 7671L), gospodarstwa.ogółem.powierzchnia.użytków.rolnych.2011..ha. = c(605520L,
45547L, 8376L, 34837L, 44259L, 8746L), gospodarstwa.ogółem.powierzchnia.użytków.rolnych.2012..ha. = c(661688L,
44304L, 8813L, 37466L, 52581L, 9908L), gospodarstwa.ogółem.powierzchnia.użytków.rolnych.2013..ha. = c(669970L,
37455L, 11152L, 40819L, 54692L, 10342L), gospodarstwa.ogółem.powierzchnia.użytków.rolnych.2014..ha. = c(657902L,
37005L, 11573L, 38467L, 53300L, 11229L), gospodarstwa.ogółem.powierzchnia.użytków.rolnych.2015..ha. = c(580730L,
31261L, 10645L, 34052L, 46343L, 10158L), gospodarstwa.ogółem.powierzchnia.użytków.rolnych.2016..ha. = c(536579L,
29200L, 9263L, 31343L, 43235L, 9986L), gospodarstwa.ogółem.powierzchnia.użytków.rolnych.2017..ha. = c(494978L,
27542L, 8331L, 29001L, 37923L, 9260L), gospodarstwa.ogółem.powierzchnia.użytków.rolnych.2018..ha. = c(484677L,
27357L, 7655L, 28428L, 37174L, 8905L), X = c(NA, NA, NA, NA,
NA, NA)), row.names = c(NA, 6L), class = "data.frame")
My attempt:
rowSums(dane_csv[, 3:length(dane_csv$Nazwa=='POLSKA')])
Using base R
rowSums(dane_csv[sapply(dane_csv, is.numeric)])
-output
1 2 3 4 5 6
6667212 627833 511473 1033876 1288648 1108797
Or with dplyr
library(dplyr)
dane_csv %>%
transmute(out = rowSums(across(where(is.numeric))))
in base R use Filter function, to select the numeric columns then do a rowSums on them
rowSums(Filter(is.numeric, df))
1 2 3 4 5 6
6667212 627833 511473 1033876 1288648 1108797
You can select only the numeric columns:
library(dplyr)
df %>%
select(where(is.numeric)) %>%
rowSums() %>%
first()
Result:
1
6667212
`
set.seed(500)
index <- sample(1:nrow(Bands_reflectance_2017),100, replace = FALSE )
Bands_reflectance_2017 <- dput(head(Bands_reflectance_2017[1:100]))
Bands_reflectance_2017 <-
structure(
list(
t2017.01.05T08.25.12.000000000_blue = c(5064L,
5096L, 5072L, 5048L, 5048L, 5064L),
t2017.01.15T08.26.22.000000000_blue = c(418L,
487L, 480L, 449L, 449L, 480L),
t2017.01.25T08.21.38.000000000_blue = c(312L,
414L, 385L, 385L, 385L, 403L),
t2017.02.04T08.27.09.000000000_blue = c(5156L,
5096L, 5204L, 5240L, 5240L, 5112L),
t2017.02.14T08.27.29.000000000_blue = c(2554L,
2896L, 2842L, 2776L, 2776L, 2934L),
t2017.02.24T08.23.38.000000000_blue = c(2662L,
2428L, 2630L, 2644L, 2644L, 2276L),
t2017.03.06T08.24.47.000000000_blue = c(340L,
403L, 409L, 407L, 407L, 391L),
t2017.03.16T08.16.07.000000000_blue = c(188L,
245L, 257L, 239L, 239L, 245L),
t2017.03.26T08.22.43.000000000_blue = c(379L,
397L, 381L, 345L, 345L, 387L),
t2017.04.05T08.23.06.000000000_blue = c(604L,
647L, 639L, 647L, 647L, 631L),
t2017.04.15T08.23.45.000000000_blue = c(311L,
382L, 376L, 379L, 379L, 425L),
t2017.04.25T08.23.17.000000000_blue = c(219L,
318L, 237L, 322L, 322L, 302L),
t2017.05.05T08.23.45.000000000_blue = c(979L,
1030L, 1021L, 1030L, 1030L, 985L),
t2017.05.15T08.28.11.000000000_blue = c(138L,
219L, 196L, 201L, 201L, 247L),
t2017.05.25T08.23.46.000000000_blue = c(655L,
779L, 736L, 752L, 752L, 777L),
t2017.06.04T08.25.50.000000000_blue = c(318L,
419L, 384L, 343L, 343L, 400L),
t2017.06.14T08.28.06.000000000_blue = c(397L,
387L, 407L, 432L, 432L, 347L),
t2017.06.24T08.26.00.000000000_blue = c(336L,
450L, 402L, 395L, 395L, 388L),
t2017.07.04T08.23.42.000000000_blue = c(502L,
538L, 512L, 495L, 495L, 505L),
t2017.07.09T08.23.09.000000000_blue = c(568L,
597L, 639L, 611L, 611L, 577L),
t2017.07.19T08.23.43.000000000_blue = c(479L,
517L, 536L, 529L, 529L, 528L),
t2017.07.24T08.23.44.000000000_blue = c(409L,
499L, 499L, 473L, 473L, 482L),
t2017.07.29T08.26.12.000000000_blue = c(781L,
801L, 810L, 823L, 823L, 735L),
t2017.08.03T08.26.43.000000000_blue = c(517L,
579L, 560L, 583L, 583L, 564L),
t2017.08.08T08.23.41.000000000_blue = c(575L,
654L, 650L, 650L, 650L, 602L),
t2017.08.13T08.23.44.000000000_blue = c(623L,
679L, 708L, 698L, 698L, 677L),
t2017.08.18T08.25.16.000000000_blue = c(614L,
651L, 648L, 597L, 597L, 651L),
t2017.08.23T08.22.22.000000000_blue = c(554L,
613L, 559L, 524L, 524L, 596L),
t2017.08.28T08.28.01.000000000_blue = c(769L,
814L, 772L, 744L, 744L, 828L),
t2017.09.02T08.23.42.000000000_blue = c(756L,
761L, 763L, 783L, 783L, 742L),
t2017.09.07T08.23.30.000000000_blue = c(807L,
865L, 826L, 838L, 838L, 837L),
t2017.09.12T08.23.35.000000000_blue = c(861L,
869L, 876L, 904L, 904L, 869L),
t2017.09.22T08.23.38.000000000_blue = c(4640L,
3780L, 4340L, 4728L, 4728L, 3060L),
t2017.09.27T08.16.41.000000000_blue = c(778L,
777L, 811L, 839L, 839L, 752L),
t2017.10.02T08.17.41.000000000_blue = c(766L,
868L, 851L, 857L, 857L, 799L),
t2017.10.07T08.24.51.000000000_blue = c(767L,
816L, 839L, 830L, 830L, 753L),
t2017.10.12T08.24.39.000000000_blue = c(678L,
688L, 706L, 750L, 750L, 627L),
t2017.10.17T08.15.32.000000000_blue = c(678L,
769L, 804L, 797L, 797L, 711L),
t2017.10.22T08.21.34.000000000_blue = c(3146L,
3134L, 3128L, 3160L, 3160L, 3118L),
t2017.10.27T08.23.27.000000000_blue = c(612L,
697L, 721L, 697L, 697L, 708L),
t2017.11.01T08.24.41.000000000_blue = c(941L,
982L, 1001L, 1010L, 1010L, 999L),
t2017.11.06T08.20.50.000000000_blue = c(670L,
824L, 836L, 824L, 824L, 785L),
t2017.11.11T08.27.40.000000000_blue = c(720L,
817L, 839L, 807L, 807L, 801L),
t2017.11.16T08.16.16.000000000_blue = c(9824L,
9744L, 9792L, 9744L, 9744L, 9536L),
t2017.11.21T08.17.00.000000000_blue = c(749L,
841L, 838L, 738L, 738L, 830L),
t2017.11.26T08.25.13.000000000_blue = c(735L,
863L, 832L, 713L, 713L, 899L),
t2017.12.01T08.20.22.000000000_blue = c(674L,
836L, 816L, 800L, 800L, 771L),
t2017.12.06T08.19.42.000000000_blue = c(2742L,
2770L, 2742L, 2762L, 2762L, 2798L),
t2017.12.11T08.19.00.000000000_blue = c(582L,
745L, 734L, 654L, 654L, 743L),
t2017.12.16T08.23.19.000000000_blue = c(926L,
1054L, 1001L, 946L, 946L, 1054L),
t2017.12.21T08.20.53.000000000_blue = c(7432L,
7484L, 7456L, 7404L, 7404L, 7484L),
t2017.12.26T08.20.39.000000000_blue = c(629L,
724L, 762L, 738L, 738L, 731L),
t2017.12.31T08.20.04.000000000_blue = c(667L,
765L, 762L, 718L, 718L, 765L),
t2017.01.05T08.25.12.000000000_green = c(5224L,
5196L, 5208L, 5152L, 5152L, 5172L),
t2017.01.15T08.26.22.000000000_green = c(837L,
938L, 907L, 858L, 858L, 927L),
t2017.01.25T08.21.38.000000000_green = c(735L,
808L, 770L, 770L, 770L, 836L),
t2017.02.04T08.27.09.000000000_green = c(5424L,
5492L, 5488L, 5536L, 5536L, 5832L),
t2017.02.14T08.27.29.000000000_green = c(3050L,
3094L, 3108L, 3228L, 3228L, 2900L),
t2017.02.24T08.23.38.000000000_green = c(2664L,
2450L, 2598L, 2646L, 2646L, 2340L),
t2017.03.06T08.24.47.000000000_green = c(702L,
735L, 749L, 727L, 727L, 729L),
t2017.03.16T08.16.07.000000000_green = c(632L,
685L, 708L, 685L, 685L, 703L),
t2017.03.26T08.22.43.000000000_green = c(744L,
841L, 806L, 809L, 809L, 818L),
t2017.04.05T08.23.06.000000000_green = c(1030L,
1036L, 1044L, 1050L, 1050L, 1040L),
t2017.04.15T08.23.45.000000000_green = c(634L,
720L, 708L, 699L, 699L, 751L),
t2017.04.25T08.23.17.000000000_green = c(619L,
698L, 716L, 723L, 723L, 687L),
t2017.05.05T08.23.45.000000000_green = c(1340L,
1368L, 1374L, 1404L, 1404L, 1354L),
t2017.05.15T08.28.11.000000000_green = c(525L,
633L, 619L, 612L, 612L, 626L),
t2017.05.25T08.23.46.000000000_green = c(1042L,
1118L, 1078L, 1028L, 1028L, 1148L),
t2017.06.04T08.25.50.000000000_green = c(655L,
778L, 783L, 769L, 769L, 813L),
t2017.06.14T08.28.06.000000000_green = c(772L,
829L, 838L, 810L, 810L, 822L),
t2017.06.24T08.26.00.000000000_green = c(741L,
888L, 848L, 798L, 798L, 865L),
t2017.07.04T08.23.42.000000000_green = c(867L,
918L, 912L, 846L, 846L, 946L),
t2017.07.09T08.23.09.000000000_green = c(936L,
1001L, 1012L, 972L, 972L, 985L),
t2017.07.19T08.23.43.000000000_green = c(848L,
911L, 925L, 915L, 915L, 903L),
t2017.07.24T08.23.44.000000000_green = c(855L,
907L, 947L, 913L, 913L, 937L),
t2017.07.29T08.26.12.000000000_green = c(1096L,
1106L, 1134L, 1150L, 1150L, 1116L),
t2017.08.03T08.26.43.000000000_green = c(987L,
1072L, 1040L, 1030L, 1030L, 1021L),
t2017.08.08T08.23.41.000000000_green = c(996L,
1011L, 1001L, 1011L, 1011L, 1032L),
t2017.08.13T08.23.44.000000000_green = c(1006L,
1100L, 1082L, 1078L, 1078L, 1092L),
t2017.08.18T08.25.16.000000000_green = c(977L,
1034L, 1032L, 976L, 976L, 1020L),
t2017.08.23T08.22.22.000000000_green = c(976L,
1054L, 1044L, 985L, 985L, 1072L),
t2017.08.28T08.28.01.000000000_green = c(1162L,
1176L, 1188L, 1150L, 1150L, 1200L),
t2017.09.02T08.23.42.000000000_green = c(1136L,
1152L, 1158L, 1176L, 1176L, 1130L),
t2017.09.07T08.23.30.000000000_green = c(1122L,
1166L, 1174L, 1194L, 1194L, 1162L),
t2017.09.12T08.23.35.000000000_green = c(1158L,
1170L, 1168L, 1180L, 1180L, 1146L),
t2017.09.22T08.23.38.000000000_green = c(3304L,
3218L, 3072L, 3580L, 3580L, 4148L),
t2017.09.27T08.16.41.000000000_green = c(1172L,
1228L, 1242L, 1224L, 1224L, 1172L),
t2017.10.02T08.17.41.000000000_green = c(1148L,
1224L, 1220L, 1200L, 1200L, 1164L),
t2017.10.07T08.24.51.000000000_green = c(1120L,
1164L, 1160L, 1148L, 1148L, 1114L),
t2017.10.12T08.24.39.000000000_green = c(1124L,
1158L, 1166L, 1144L, 1144L, 1090L),
t2017.10.17T08.15.32.000000000_green = c(1092L,
1190L, 1180L, 1154L, 1154L, 1146L),
t2017.10.22T08.21.34.000000000_green = c(3140L,
3124L, 3142L, 3134L, 3134L, 3096L),
t2017.10.27T08.23.27.000000000_green = c(1064L,
1104L, 1116L, 1078L, 1078L, 1098L),
t2017.11.01T08.24.41.000000000_green = c(1298L,
1310L, 1344L, 1344L, 1344L, 1318L),
t2017.11.06T08.20.50.000000000_green = c(1114L,
1240L, 1220L, 1164L, 1164L, 1212L),
t2017.11.11T08.27.40.000000000_green = c(1182L,1278L, 1278L, 1192L, 1192L, 1284L),
t2017.11.16T08.16.16.000000000_green = c(8872L, 8728L, 8816L, 8904L, 8904L, 8600L),
t2017.11.21T08.17.00.000000000_green = c(1166L, 1268L, 1250L, 1158L, 1158L, 1260L),
t2017.11.26T08.25.13.000000000_green = c(1138L, 1272L, 1288L, 1240L, 1240L, 1278L)), row.names = c(NA, 6L), class = "data.frame")
`
I have a dataframe of dates for per specific bands with 534 column headers as follow:
"t2017-12-31T08:20:04.000000000_red_edge_3"
"t2017-02-04T08:27:09.000000000_nir_1"
"t2017-12-31T08:20:04.000000000_swir_2"
Now, I want to remove everything and only remain with the date and the band name e.g in column header one and two, I want to only remain with
"2017-12-31_red_edge_3"
"2017-02-04_nir_1"
I have about 534 columns and most characters are not consistent because each date time is different and more band examples not similar to what is shown here for all the 534 records, so I was only able to remove repetitive characters such as "T08", ":","t" and "000000000" which are available in all the columns. How do I remove the values between the date and the band characters when they vary per each column and so I cannot use :
for ( col in 1:ncol(Bands_reflectance_2017[5:534])){
colnames(Bands_reflectance_2017)[5:534] <- sub(".000000000", "", colnames(Bands_reflectance_2017)[5:534]) #Remove .000000000
}
etc
Also at the end of the day, I want to replace each bandname with a band coding system such as assign "nir-1" as "B8" and "12" as the month of "December" so that for example my first and second column header reads:
B7_December31
B8_February02
Cell 1
Cell 2
Cell 3
Cell 4
"B7_December31", "B8_February02" which are better naming to run in a random forest. Because I am running into problems of
Error in eval(predvars, data, env) : object '"t2017-12-31T08:20:04.000000000_red_edge_3"' not found
if I keep the naming convention in the example
I have the following column header names in my dataframe (Bands_reflectance_2017) of 534 columns :
"t2017-01-25T08:21:38.000000000_blue"
"t2017-08-23T08:22:22.000000000_green"
Cell 1
Cell 2
Cell 3
Cell 4
I want to remove everything except the date and band name e.g "2017_01_25_blue"
I tried:
for ( col in 1:ncol(Bands_reflectance_2017[5:534])){
colnames(Bands_reflectance_2017)[5:534] <- sub("T08", "", colnames(Bands_reflectance_2017)[5:534]) #Remove T08
But as some of the characters I want to remove are unique per each 534 columns, I am not sure how to remove them
I expect this at the end of the day:
2017_01_25_blue
2017_08_23_green
Cell 1
Cell 2
Cell 3
Cell 4
The later
"B2_December31", B3_August23
Cell 1
Cell 3
I also tried this :
substr(colnames(Bands_Reflectance_2017[2:335]),2,11)
What is the best way to do it? I am fairly new to programming and to r.
Thanks for sharing your code and data. Most people won't download random files. In the future you can share data with dput(data) or a smaller version with dput(head(data)).
library(stringr)
library(lubridate)
# Using the data frame that you provided with dput, which I call "df1" here
# You'll probably have to adjust the numbers between the [] because your
# data frame is vastly different from what I have and I'm not sure I have
# the write number, but since you said 534 columns, I'm using that.
df1 <- names(df1)[1:534]
band_names <- rep(NA, length(df1))
# This is messy. I'm sure someone who knows stringr or
# regex better has a neater way to do this.
# str_locate will find positions in a string and return the numeric value of the position
# str_sub uses positions to pull substrings
# gsub replaces patterns
# What this does is find the positions of the dates or labels,
# pulls out the substring, replaces things not needed
# (like "-" I used to mark positions), changed the number for date
# to something numeric so that month() can be switched from number to text.
for(i in 1:length(df1)) {
band_names[i] <- paste0(as.character(month(as.numeric(gsub("\\.","",
str_sub(df1[i],str_locate(df1[i],"\\.[0-9]{2}")))),
label=T, abbr = F)),gsub("T","",str_sub(df1[i],str_locate(df1[i],
"\\.[0-9]{2}T"))),"_",
str_sub(df1[i],str_locate(df1[i],"[a-z]{3,}.+")))}
# You can look at the results
band_names
[1] "Dec-12_red_edge_3" "Feb-02_nir_1" "Dec-12_swir_2"
# Split up band_names to replace the band label with number
band_out <- str_sub(band_names, 7)
band_stay <- str_sub(band_names, 1, 6)
# Made data frame up for the few example lines. I'm not downloading the CSV and I'm not going to find out the actual band names, labels, and numbers.
fake_bands <- data.frame(label = c("red_edge_3", "nir_1", "swir_2"), number = c("b1","b3","b2"))
# Change out labels for the numbers
band_replace <- fake_bands[match(band_out, fake_bands$label), "number"]
new_names <- paste0(band_stay, band_replace)
new_name
[1] "Dec-12_b1" "Feb-02_b3" "Dec-12_b2"
# Again, you might have to adjust the numbers in []
names(df1)[1:534] <- new_names
You're going to have to expand/replace the fake_bands data frame I made here with a data frame that has two columns. One column should have the labels, like "red_edge_3", and the other should have the appropriate band number.
this is the table that has the information, it is much larger but I have just included a small part of it
Part 1 : (solved) This might seem quite easy, but I am very stuck on creating code to make a graph which shows the change over time. The table shows infant mortality and I just need a graph showing how the infant mortality of one country has changed over time.
Part 2 : how can I fit a line to the data - for example, for just one country? I need to model the data as a linear response. How can I do this?
my_data <- structure(list(`1800` = c("Afghanistan", "Albania", "Algeria",
"Andorra", "Angola", "Antigua and Barbuda", "Argentina", "Armenia",
"Australia", "Austria"), `1801` = c(469L, 375L, 460L, NA, 486L,
474L, 402L, 371L, 391L, 387L), `1802` = c(469L, 375L, 460L, NA,
486L, 470L, 402L, 371L, 391L, 373L), `1803` = c(469L, 375L, 460L,
NA, 486L, 466L, 402L, 370L, 391L, 359L), `1804` = c(469L, 375L,
460L, NA, 486L, 462L, 402L, 370L, 391L, 346L), `1805` = c(469L,
375L, 460L, NA, 486L, 458L, 402L, 369L, 391L, 333L), `1806` = c(469L,
375L, 460L, NA, 486L, 455L, 402L, 369L, 391L, 321L), `1807` = c(470L,
375L, 460L, NA, 486L, 451L, 402L, 368L, 391L, 309L), `1808` = c(470L,
375L, 460L, NA, 486L, 447L, 402L, 368L, 391L, 325L), `1809` = c(470L,
375L, 460L, NA, 486L, 444L, 402L, 367L, 391L, 316L)), row.names = c(NA,
10L), class = "data.frame")
You can plot what the question asks for with package ggplot2.
This type of problems generaly has to do with reshaping the data. The format should be the long format and the data is in wide format. See this post on how to reshape the data from long to wide format.
library(dplyr)
library(tidyr)
library(ggplot2)
my_data %>%
rename(country = `1800`) %>%
pivot_longer(
cols = starts_with('18'),
names_to = 'time',
values_to = 'mortality'
) %>%
mutate(time = as.numeric(time)) %>%
ggplot(aes(time, mortality, color = country)) +
geom_point() +
geom_line()
We can use matplot from base R
matplot(t('row.names<-'(as.matrix(my_data[-1]), my_data[[1]])),
type = 'l', xaxt = 'n')
legend("top", my_data[[1]], col = seq_along(my_data[[1]]),
fill = seq_along(my_data[[1]]))
I have a dataframe where each row in the data represents a matchup in a soccer game. Here is a summary with some columns removed, and only for 50 games of a season:
dput(mydata)
structure(list(home_id = c(75L, 323L, 607L, 3627L, 3645L, 641L,
204L, 111L, 287L, 179L, 1062L, 292L, 413L, 275L, 182L, 3639L,
179L, 2649L, 111L, 478L, 383L, 3645L, 275L, 577L, 3639L, 75L,
413L, 287L, 607L, 3627L, 1062L, 75L, 583L, 323L, 3736L, 577L,
179L, 287L, 275L, 3645L, 3639L, 583L, 179L, 413L, 641L, 204L,
478L, 292L, 607L, 323L), away_id = c(3645L, 3736L, 583L, 2649L,
577L, 75L, 3736L, 182L, 323L, 607L, 3639L, 583L, 478L, 383L,
3645L, 607L, 413L, 204L, 641L, 583L, 3627L, 179L, 182L, 3736L,
292L, 204L, 323L, 1062L, 2649L, 3639L, 204L, 292L, 111L, 607L,
182L, 3645L, 478L, 413L, 641L, 287L, 577L, 182L, 2649L, 1062L,
383L, 111L, 3736L, 3627L, 75L, 275L), home_rating = c(1546.64167937943,
1534.94287021653, 1514.51852002403, 1558.91823781777, 1555.76784458784,
1518.37707748967, 1464.5264202735, 1642.57388443639, 1447.37725553409,
1420.69724095008, 1428.51535356064, 1512.81896541907, 1463.29314217469,
1492.70306452585, 1404.65235407107, 1418.03767059747, 1420.69724095008,
1532.76811278441, 1642.57388443639, 1515.31896572792, 1498.7997953168,
1555.76784458784, 1492.70306452585, 1519.94395373088, 1418.03767059747,
1546.64167937943, 1463.29314217469, 1447.37725553409, 1514.51852002403,
1558.91823781777, 1428.51535356064, 1546.64167937943, 1524.71735294388,
1534.94287021653, 1484.09023843799, 1519.94395373088, 1420.69724095008,
1447.37725553409, 1492.70306452585, 1555.76784458784, 1418.03767059747,
1524.71735294388, 1420.69724095008, 1463.29314217469, 1518.37707748967,
1464.5264202735, 1515.31896572792, 1512.81896541907, 1514.51852002403,
1534.94287021653), away_rating = c(1555.76784458784, 1484.09023843799,
1524.71735294388, 1532.76811278441, 1519.94395373088, 1546.64167937943,
1484.09023843799, 1404.65235407107, 1534.94287021653, 1514.51852002403,
1418.03767059747, 1524.71735294388, 1515.31896572792, 1498.7997953168,
1555.76784458784, 1514.51852002403, 1463.29314217469, 1464.5264202735,
1518.37707748967, 1524.71735294388, 1558.91823781777, 1420.69724095008,
1404.65235407107, 1484.09023843799, 1512.81896541907, 1464.5264202735,
1534.94287021653, 1428.51535356064, 1532.76811278441, 1418.03767059747,
1464.5264202735, 1512.81896541907, 1642.57388443639, 1514.51852002403,
1404.65235407107, 1555.76784458784, 1515.31896572792, 1463.29314217469,
1518.37707748967, 1447.37725553409, 1519.94395373088, 1404.65235407107,
1532.76811278441, 1428.51535356064, 1498.7997953168, 1642.57388443639,
1484.09023843799, 1558.91823781777, 1546.64167937943, 1492.70306452585
)), .Names = c("home_id", "away_id", "home_rating", "away_rating"
), row.names = c(NA, 50L), class = "data.frame")
Heres what it looks like:
> head(mydata)
home_id away_id home_rating away_rating
1 75 3645 1546.642 1555.768
2 323 3736 1534.943 1484.090
3 607 583 1514.519 1524.717
4 3627 2649 1558.918 1532.768
5 3645 577 1555.768 1519.944
6 641 75 1518.377 1546.642
The columns home_rating and away_rating are scores that reflect how good each team is, and I'd like to use these columns in an apply function. In particular, I have another function named use_ratings() that looks like this:
# takes a rating from home and away team, as well as is_cup boolean, returns score
use_ratings <- function(home_rating, away_rating, is_cup = FALSE) {
if(is_cup) { # if is_cup, its a neutral site game
rating_diff <- -(home_rating - away_rating) / 400
} else {
rating_diff <- -(home_rating + 85 - away_rating) / 400
}
W_e <- 1 / (10^(rating_diff) + 1)
return(W_e)
}
I'd like to apply this function over every row my mydata, using the values in the home_rating and away_rating column as the parameters passed each time to use_ratings(). How can I do this, thanks?
#SymbolixAU is absolutely right in that the best way to do this (in terms of both speed and readability) is taking advantage of vectorization directly. But if you were to use an "apply function", that function would probably be mapply() or apply():
Using mapply():
mapply(use_ratings, home_rating = mydata$home_rating,
away_rating = mydata$away_rating, is_cup = <a vector of booleans>)
Using apply():
apply(mydata, 1, function(row), use_ratings(row$home_rating, row$away_rating, <row$is_cup, which is missing>)
Multivariate apply (mapply) simultaneously applies a multivariate function to several objects corresponding to its arguments. apply applies a functions over the margins of matrix-like object. Setting MARGIN=1 asks apply to operate on rows. Hence, we had to modify the function to operate on rows and feed the relevant arguments to use_ratings.
I have these functions:
foo <- function(z){
bob <- which(z$signchg!=0)
z$crit1 <- "opening"
ifelse(length(bob)==0, z$crit1 <- "opening",
ifelse(length(bob)==1,
z$crit1[match(min(bob, na.rm=T), as.numeric(rownames(z)))] <- "opening",
z$crit1[match(min(bob, na.rm=T), as.numeric(rownames(z))):match(max(bob[bob!=max(bob, na.rm=T)], na.rm=T), as.numeric(rownames(z)))] <- "unconscious follow"))
z$crit1
}
foo2 <- function(y){
bob <- which(y$signchg!=0)
y$crit2 <- "opening"
ifelse(length(bob)!=0,
ifelse(length(y[y$crit1=="unconscious follow",]$sacc)==0,
y$crit2[match(max(bob, na.rm=T), as.numeric(rownames(y))):nrow(y)] <- "opening",
ifelse(length(head(which(y$sacc>max(y[y$crit1=="unconscious follow",]$sacc, na.rm=T)),1))==0, y$crit2 <- "opening",
y$crit2[match(max(bob, na.rm=T), as.numeric(rownames(y))):head(which(y$sacc>max(y[y$crit1=="unconscious follow",]$sacc, na.rm=T)),1)] <- "unconscious follow")),
y$crit2 <- "opening")
y$crit2
}
foo3 <- function(x){
bob <- which(x$signchg!=0)
x$closing <- "opening"
ifelse(length(bob)!=0,
x$closing[1:match(min(bob), as.numeric(rownames(x)))-1] <- "closing", x$closing <- "opening")
x$closing
}
Data
Following is the data set containing 3 unique Vehicle.IDs (8, 12 and 1179). I took a sample of 50 rows:
> dput(ntraj1oo)
structure(list(Vehicle.ID = c(1179L, 12L, 12L, 1179L, 1179L,
1179L, 8L, 1179L, 1179L, 1179L, 8L, 1179L, 12L, 1179L, 12L, 8L,
1179L, 12L, 1179L, 1179L, 12L, 8L, 8L, 1179L, 1179L, 8L, 8L,
12L, 1179L, 1179L, 12L, 1179L, 8L, 12L, 1179L, 1179L, 1179L,
12L, 1179L, 12L, 1179L, 1179L, 12L, 12L, 8L, 1179L, 12L, 1179L,
12L, 1179L), Frame.ID = c(3145L, 225L, 169L, 3549L, 3258L, 3262L,
289L, 3246L, 3155L, 3316L, 74L, 3124L, 135L, 3398L, 434L, 342L,
3288L, 93L, 3221L, 3384L, 293L, 347L, 452L, 3301L, 3165L, 448L,
230L, 400L, 3343L, 3302L, 305L, 3242L, 333L, 181L, 3362L, 3201L,
3356L, 150L, 3466L, 129L, 3123L, 3513L, 124L, 234L, 265L, 3440L,
407L, 3497L, 454L, 3208L), sacc = c(1.2024142815693, 0.167471842386292,
0.389526218261013, 1.0608535451082, 1.34658348989163, 1.30827746568167,
0.676275947080881, 1.56168338812933, 1.45322442414619, 0.236926713182157,
-0.331746789624733, -0.296457890957575, 0.578696068042145, -0.104188799716241,
1.64373161583451, 0.74974701439042, 1.024635813019, -0.212898242245164,
1.54066066716165, -0.439030115502196, -0.0908376863222584, 0.691762173865882,
0.0956005839166526, 0.681722722129702, 1.44251516088868, -0.0772419385643099,
0.430003386843667, 1.05958689269776, -0.402975701449174, 0.648704793894625,
-0.0106984134869645, 1.63176231974786, 0.884756294567357, 0.219219760305613,
-0.428935665947576, 1.54207226189423, -0.40185390261026, 0.441773747246007,
0.983291264446801, 0.596528992338635, -0.351283490561794, 1.11356697363866,
0.64253447660771, 0.0491453453593057, 0.715465534653409, 0.760489329987362,
1.17711496285387, 1.07374138870048, 1.45061613430159, 1.5589484008358
), relative.v = c(-7.20683108836496, 1.41754770518283, -0.298659684886637,
-6.37538134834612, -4.00321428084874, -3.82309181190075, -0.727408127343359,
-4.14013093963352, -6.7253476528766, 4.84058965232001, -2.51365849828336,
-4.82796782714515, -2.2317642496626, -1.54138020745749, -2.91023536393949,
-0.904299522098896, -0.549568281350204, -2.99526240263305, -6.18033016152812,
1.08350055196426, 2.52903114154146, -1.01292990996659, -2.54795991136474,
2.14686490991681, -7.03361953812604, -1.24128349787506, -0.149590211893916,
-4.29601660568767, 4.70617725169663, 2.47874406770293, -0.442134244952982,
-4.72366659693532, -1.10949949758366, 0.850218831661735, 2.42271763669292,
-8.2259447855115, 1.44195914620509, -1.88517424984066, -6.48099656406857,
-3.22006152601574, -4.53955604248154, -7.95149284172251, -3.95841822705948,
0.978824881565963, -0.832249768583615, -3.99216317969555, -4.56499371815966,
-5.89675705778252, -0.269620247442631, -7.75907851102451), nspacing = c(67.9564390167725,
64.4222965548587, 69.9984793222568, 203.630967606615, 142.825962756316,
144.4974871287, 69.5663930132816, 138.544960496636, 75.1355363890009,
145.313025161387, 62.76071823522, 52.3376957871262, 63.854711706948,
119.303164791766, 82.7183786313178, 78.0100285715123, 151.786017600382,
41.6146093571944, 124.898333310041, 118.810008693412, 57.9329927929634,
78.1975432716604, 97.9377561743831, 151.845647043811, 81.0478415333349,
97.4581470183944, 63.9970348761168, 67.6721711092462, 129.125820950528,
151.636781319948, 56.1796449012404, 136.907951327661, 77.12358891961,
68.5284958380145, 126.438422026932, 109.685235806325, 126.52282899785,
65.3271870401025, 148.692268232249, 62.3990368362372, 51.846063554017,
178.498350166457, 60.768801672643, 62.2994121863875, 69.1176002124943,
135.401524339836, 71.0466952274176, 167.365284062391, 85.027302124975,
115.693182668085)), class = c("tbl_df", "tbl", "data.frame"), .Names = c("Vehicle.ID",
"Frame.ID", "sacc", "relative.v", "nspacing"), row.names = c(901L,
606L, 550L, 1305L, 1014L, 1018L, 261L, 1002L, 911L, 1072L, 46L,
880L, 516L, 1154L, 815L, 314L, 1044L, 474L, 977L, 1140L, 674L,
319L, 424L, 1057L, 921L, 420L, 202L, 781L, 1099L, 1058L, 686L,
998L, 305L, 562L, 1118L, 957L, 1112L, 531L, 1222L, 510L, 879L,
1269L, 505L, 615L, 237L, 1196L, 788L, 1253L, 835L, 964L))
Applying Functions Produces Error
Now, applying the functions on this data gives error:
ovv <- ntraj1oo %>%
group_by(Vehicle.ID) %>%
mutate(signs = sign(relative.v),
signchg = c(NA, diff(signs))) %>%
do(data.frame(Frame.ID=.$Frame.ID,crit1=foo(.), crit2=foo2(.), closing=foo3(.))) %>%
inner_join(x=ntraj1oo, y=., by=c("Vehicle.ID", "Frame.ID")) %>%
mutate(behavior = ifelse(crit1=="unconscious follow" |crit2=="unconscious follow", "Unconscious Following",
ifelse(closing=="closing" & relative.v>0, "closing",
ifelse(closing=="closing" & relative.v<0, "Unconscious Following", "opening")))) %>%
ungroup()
Error
Error in match(max(bob, na.rm = T), as.numeric(rownames(y))):nrow(y) :
NA/NaN argument
But using the data for 1 vehicle only does not produce the error. I tested for vehicles 8, 12 and 1179 separately and there was no error.
These were only 3 vehicles and total 50 rows. If I apply the functions on the original data set having 944 Vehicle.IDs, I get following error:
Error in match(min(bob, na.rm = T), as.numeric(rownames(z))):match(max(bob[bob != :
NA/NaN argument
Again, using the complete data for 1 vehicle does not produce any error. Why is dplyr not applying the functions when Vehicle.ID is more than 1?
Trusty old workaround when group_by() fails, we revert to good old-fashioned row-indices and for-loops:
# Create new cols, with pessimism
ovv$crit1 <- NA
ovv$crit2 <- NA
ovv$closing <- NA
ovv$behavior <- NA
for (vid in uniq(ovv$Vehicle.ID)) {
ovv $newcol
# Form a row-index
I <- which(ovv$Vehicle.ID == vid)
# Apply your fns, vid-wise...
ovv[I,]$crit1 <- foo(ovv[I,])
ovv[I,]$crit2 <- foo2(ovv[I,])
ovv[I,]$closing <- foo3(ovv[I,])
}
ovv$behavior <- ifelse(...)