How to calculate mean with expending window - r

I have a data frame below. I wondered how to calculate the mean for column 'value_t' by expanding window starting from '2014-1-5'. e.g. val(1)=mean(1:5), value(2)=mean(1:6), value(3)=mean(1:7). I hope the algorithm is efficient (w/o loop).
df<-data.frame(date_t=paste('2014-01-',1:15,sep=""),value_t=1:15)
> df
date_t value_t
1 2014-01-1 1
2 2014-01-2 2
3 2014-01-3 3
4 2014-01-4 4
5 2014-01-5 5
6 2014-01-6 6
7 2014-01-7 7
8 2014-01-8 8
9 2014-01-9 9
10 2014-01-10 10
11 2014-01-11 11
12 2014-01-12 12
13 2014-01-13 13
14 2014-01-14 14
15 2014-01-15 15

How about sapply(5:NROW(df), function(.) mean(df$value_t[1:.]))? It involves kind of a loop (sapply) but it should be reasonable fast.

Have a look at this
df$val <- cumsum(df$value_t) / 1:nrow(df)
df$val[1:4] <- NA
# date_t value_t val
# 2014-01-1 1 NA
# 2014-01-2 2 NA
# 2014-01-3 3 NA
# 2014-01-4 4 NA
# 2014-01-5 5 3.0
# 2014-01-6 6 3.5
# 2014-01-7 7 4.0
# 2014-01-8 8 4.5
# 2014-01-9 9 5.0
# 2014-01-10 10 5.5
# 2014-01-11 11 6.0
# 2014-01-12 12 6.5
# 2014-01-13 13 7.0
# 2014-01-14 14 7.5
# 2014-01-15 15 8.0
If you just want the vector, and you don't want to tamper with df, do
val <- (cumsum(df$value_t) / 1:nrow(df))[-(1:4)]
# 3.0 3.5 4.0 4.5 5.0 5.5 6.0 6.5 7.0 7.5 8.0

The sapply(...) solution is faster than the for(...) loop, but only just (about 2% - well within the margin of error). It turns out that extracting the column from the data frame at every step slows things down considerably. If you grab that column as a vector first, you get a ~25% improvement.
df <- data.frame(value=1:1e4)
f.sapply <- function() sapply(5:nrow(df), function(.) mean(df$value[1:.]))
f.loop <- function() {result <- numeric(nrow(df)-4)
for (i in 5:nrow(df)) result[i-4] <- mean(df$value[1:i])
result
}
f.vec <- function() {vec<-df$value
sapply(5:nrow(df), function(.) mean(vec[1:.]))
}
# do they produce identical results?
identical(f.sapply(),f.loop())
# [1] TRUE
identical(f.sapply(),f.vec())
# [1] TRUE
# which is faster?
library(microbenchmark)
microbenchmark(f.sapply(),f.loop(),f.vec())
# Unit: milliseconds
# expr min lq median uq max neval
# f.sapply() 904.2934 929.7361 947.7621 978.8775 1496.455 100
# f.loop() 927.5288 950.3632 963.5926 1012.2407 1347.889 100
# f.vec() 669.5615 697.3639 711.1498 751.2634 1060.056 100

Related

Is there an efficient alternative to table()?

I use the following command:
table(factor("list",levels=1:"n")
with "list": (example) a = c(1,3,4,4,3)
and levels = 1:5, to also take the 2 and 5 into consideration.
For really big datasets, my code seems to be very ineffective.
Does anyone know a hidden library or a code snippet to make it faster?
We could use fnobs from collapse which would be efficient
library(collapse)
fnobs(df, g = df$X1)
In base R, tabulate is more efficient compared to table
tabulate(df$X1)
[1] 9 6 15 13 11 9 7 9 11 10
We could also use janitor::tabyl:
library(janitor)
df %>%
tabyl(X1) %>%
adorn_totals()
X1 n percent
1 9 0.09
2 6 0.06
3 15 0.15
4 13 0.13
5 11 0.11
6 9 0.09
7 7 0.07
8 9 0.09
9 11 0.11
10 10 0.10
Total 100 1.00
It's not exactly what you are looking for, but perhaps you can use this:
library(dplyr)
set.seed(8192)
df <- data.frame(X1 = sample(1:10, 100, replace = TRUE))
df %>%
count(X1)
returns
X1 n
1 1 9
2 2 6
3 3 15
4 4 13
5 5 11
6 6 9
7 7 7
8 8 9
9 9 11
10 10 10
If you need to count more numbers (including missing ones), you could use
library(tidyr)
library(dplyr)
df2 <- data.frame(X1 = 1:12)
df %>%
count(X1) %>%
right_join(df2, by="X1") %>%
mutate(n = replace_na(n, 0L))
to get
X1 n
1 1 9
2 2 6
3 3 15
4 4 13
5 5 11
6 6 9
7 7 7
8 8 9
9 9 11
10 10 10
11 11 0
12 12 0
TL;DR the winner is base::tabulate.
Summing up, the base objective was a performance so I prepared a microbenchmark of all provided solutions. I use small and bigger vectors, two different scenerio. For collapse package on my machine I have to download the newest Rcpp package 1.0.7 (to suppress crashes). Even added by me Rcpp solution is slower than base::tabulate.
suppressMessages(library(janitor))
suppressMessages(library(collapse))
suppressMessages(library(dplyr))
suppressMessages(library(cpp11))
# source https://stackoverflow.com/questions/31001392/rcpp-version-of-tabulate-is-slower-where-is-this-from-how-to-understand
Rcpp::cppFunction('IntegerVector tabulate_rcpp(const IntegerVector& x, const unsigned max) {
IntegerVector counts(max);
for (auto& now : x) {
if (now > 0 && now <= max)
counts[now - 1]++;
}
return counts;
}')
set.seed(1234)
a = c(1,3,4,4,3)
levels = 1:5
df <- data.frame(X1 = a)
microbenchmark::microbenchmark(tabulate_rcpp = {tabulate_rcpp(df$X1, max(df$X1))},
base_table = {base::table(factor(df$X1, 1:max(df$X1)))},
stats_aggregate = {stats::aggregate(. ~ X1, cbind(df, n = 1), sum)},
graphics_hist = {hist(df$X1, plot = FALSE, right = FALSE)[c("breaks", "counts")]},
janitor_tably = {adorn_totals(tabyl(df, X1))},
collapse_fnobs = {fnobs(df, df$X1)},
base_tabulate = {tabulate(df$X1)},
dplyr_count = {count(df, X1)})
#> Unit: microseconds
#> expr min lq mean median uq max
#> tabulate_rcpp 2.959 5.9800 17.42326 7.9465 9.5435 883.561
#> base_table 48.524 59.5490 72.42985 66.3135 78.9320 153.216
#> stats_aggregate 829.324 891.7340 1069.86510 937.4070 1140.0345 2883.025
#> graphics_hist 148.561 170.5305 221.05290 188.9570 228.3160 958.619
#> janitor_tably 6005.490 6439.6870 8137.82606 7497.1985 8283.3670 53352.680
#> collapse_fnobs 14.591 21.9790 32.63891 27.2530 32.6465 417.987
#> base_tabulate 1.879 4.3310 5.68916 5.5990 6.6210 16.789
#> dplyr_count 1832.648 1969.8005 2546.17131 2350.0450 2560.3585 7210.992
#> neval
#> 100
#> 100
#> 100
#> 100
#> 100
#> 100
#> 100
#> 100
df <- data.frame(X1 = sample(1:5, 1000, replace = TRUE))
microbenchmark::microbenchmark(tabulate_rcpp = {tabulate_rcpp(df$X1, max(df$X1))},
base_table = {base::table(factor(df$X1, 1:max(df$X1)))},
stats_aggregate = {stats::aggregate(. ~ X1, cbind(df, n = 1), sum)},
graphics_hist = {hist(df$X1, plot = FALSE, right = FALSE)[c("breaks", "counts")]},
janitor_tably = {adorn_totals(tabyl(df, X1))},
collapse_fnobs = {fnobs(df, df$X1)},
base_tabulate = {tabulate(df$X1)},
dplyr_count = {count(df, X1)})
#> Unit: microseconds
#> expr min lq mean median uq max
#> tabulate_rcpp 4.847 8.8465 10.92661 10.3105 12.6785 28.407
#> base_table 83.736 107.2040 121.77962 118.8450 129.9560 184.427
#> stats_aggregate 1027.918 1155.9205 1338.27752 1246.6205 1434.8990 2085.821
#> graphics_hist 209.273 237.8265 274.60654 258.9260 300.3830 523.803
#> janitor_tably 5988.085 6497.9675 7833.34321 7593.3445 8422.6950 13759.142
#> collapse_fnobs 26.085 38.6440 51.89459 47.8250 57.3440 333.034
#> base_tabulate 4.501 6.7360 8.09408 8.2330 9.2170 11.463
#> dplyr_count 1852.290 2000.5225 2374.28205 2145.9835 2516.7940 4834.544
#> neval
#> 100
#> 100
#> 100
#> 100
#> 100
#> 100
#> 100
#> 100
Created on 2021-08-01 by the reprex package (v2.0.0)
A base R option using aggregate (borrowing df from #Martin Gal)
> aggregate(. ~ X1, cbind(df, n = 1), sum)
X1 n
1 1 9
2 2 6
3 3 15
4 4 13
5 5 11
6 6 9
7 7 7
8 8 9
9 9 11
10 10 10
Another option is using hist
> hist(df$X1, plot = FALSE, right = FALSE)[c("breaks", "counts")]
$breaks
[1] 1 2 3 4 5 6 7 8 9 10
$counts
[1] 9 6 15 13 11 9 7 9 21
Here is one more: summarytools
Data from Martin Gal! Many thanks:
library(summarytools)
set.seed(8192)
df <- data.frame(X1 = sample(1:10, 100, replace = TRUE))
summarytools::freq(df$X1, cumul=FALSE)
Output:
Freq % Valid % Total
----------- ------ --------- ---------
1 9 9.00 9.00
2 6 6.00 6.00
3 15 15.00 15.00
4 13 13.00 13.00
5 11 11.00 11.00
6 9 9.00 9.00
7 7 7.00 7.00
8 9 9.00 9.00
9 11 11.00 11.00
10 10 10.00 10.00
<NA> 0 0.00
Total 100 100.00 100.00
If a faster alternative to table() is required, including cross-tabulation, collapse::qtab(), available since v1.8.0 (May 2022) is a faithful and noticeably faster alternative. fcount() can also be used in the univariate case, and returns a data.frame.
library(collapse) # > v1.8.0, and > 1.9.0 for fcount()
library(microbenchmark)
v = sample(10000, 1e6, TRUE)
microbenchmark(qtab(v, sort = FALSE), fcount(v), tabulate(v), times = 10)
Unit: milliseconds
expr min lq mean median uq max neval
qtab(v, sort = FALSE) 1.911707 1.945245 2.002473 1.963654 2.027942 2.207891 10
fcount(v) 1.885549 1.906746 1.978894 1.932310 2.103997 2.138027 10
tabulate(v) 2.321543 2.323716 2.333839 2.328206 2.334499 2.372506 10
v2 = sample(10000, 1e6, TRUE)
microbenchmark(qtab(v, v2), qtab(v, v2, sort = FALSE), table(v, v2), times = 10)
Unit: milliseconds
expr min lq mean median uq max neval
qtab(v, v2) 45.61279 51.14840 74.16168 60.7761 72.86385 157.6501 10
qtab(v, v2, sort = FALSE) 41.30812 49.66355 57.02565 51.3568 54.69859 118.1289 10
table(v, v2) 281.60079 282.85273 292.48119 286.0535 288.19253 349.5513 10
That being said, tabulate() is pretty much as fast as it gets as far as C code is concerned. But it has a clear caveat, which is that it does not hash the values at all, but determines the maximum value and allocates a results vector of that length, using it as a table to count values. Consider this:
v[10] = 1e7L # Adding a random large value here
length(tabulate(v))
[1] 10000000
length(table(v))
[1] 10001
length(qtab(v))
[1] 10001
So you get a results vector with 6.99 million zeros, and your performance deteriorates
microbenchmark(qtab(v, sort = FALSE), fcount(v), tabulate(v), times = 10)
Unit: milliseconds
expr min lq mean median uq max neval
qtab(v, sort = FALSE) 1.873249 1.900473 1.966721 1.923064 2.064186 2.126588 10
fcount(v) 1.829338 1.850330 1.926676 1.880199 2.021013 2.057667 10
tabulate(v) 4.207789 4.357439 5.066296 4.417012 4.558216 10.347744 10
In light of this, the fact that qtab() actually does hash every value and achieves this performance is rather remarkable.

R Compute Statistics on Lagged Partitions

I have a data.frame with one column containing categorical data, one column containing dates, and one column containing numeric values. For simplicity, see the sample below:
A B C
1 L 2015-12-01 5.7
2 M 2015-11-30 2.1
3 K 2015-11-01 3.2
4 L 2015-10-05 5.7
5 M 2015-12-05 1.2
6 L 2015-11-15 2.3
7 L 2015-12-03 4.4
I would like to, for each category in A, compute a lagging average (e.g. average of the previous 30 days' values in column C).
I cannot for the life of me figure this one out. I have tried using sapply and a custom function that subsets the data.frame on category and date (or a deep copy of it) and returns the statistic (think mean or sd) and that works fine for single values, but it returns all NA's from inside sapply.
Any help you can give is appreciated.
This could be done more compactly, but here I have drawn it out to make it easiest to understand. The core is the split, lapply/apply, and then putting it back together. It uses a date window rather than a solution based on sorting, so it is very general. I also put the object back to its original order to enable direct comparison.
# set up the data
set.seed(100)
# create a data.frame with about a two-month period for each category of A
df <- data.frame(A = rep(c("K", "L", "M"), each = 60),
B = rep(seq(as.Date("2015-01-01"), as.Date("2015-03-01"), by="days"), 3),
C = round(runif(180)*6, 1))
head(df)
## A B C
## 1 K 2015-01-01 1.8
## 2 K 2015-01-02 1.5
## 3 K 2015-01-03 3.3
## 4 K 2015-01-04 0.3
## 5 K 2015-01-05 2.8
## 6 K 2015-01-06 2.9
tail(df)
## A B C
## 175 M 2015-02-24 4.8
## 176 M 2015-02-25 2.0
## 177 M 2015-02-26 5.7
## 178 M 2015-02-27 3.9
## 179 M 2015-02-28 2.8
## 180 M 2015-03-01 3.6
# preserve original order
df$originalOrder <- 1:nrow(df)
# randomly shuffle the order
randomizedOrder <- order(runif(nrow(df)))
df <- df[order(runif(nrow(df))), ]
# split on A - your own data might need coercion of A to a factor
df.split <- split(df, df$A)
# set the window size
window <- 30
# compute the moving average
listD <- lapply(df.split, function(tmp) {
apply(tmp, 1, function(x) mean(tmp$C[tmp$B <= as.Date(x["B"]) & tmp$B (as.Date(x["B"]) - window)]))
})
# combine the result with the original data
result <- cbind(do.call(rbind, df.split), rollingMean = unlist(listD))
# and tidy up:
# return to original order
result <- result[order(result$originalOrder), ]
result$originalOrder <- NULL
# remove the row names
row.names(result) <- NULL
result[c(1:5, 59:65), ]
## A B C rollingMean
## 1 K 2015-01-01 1.8 1.800000
## 2 K 2015-01-02 1.5 1.650000
## 3 K 2015-01-03 3.3 2.200000
## 4 K 2015-01-04 0.3 1.725000
## 5 K 2015-01-05 2.8 1.940000
## 59 K 2015-02-28 3.6 3.080000
## 60 K 2015-03-01 1.3 3.066667
## 61 L 2015-01-01 2.8 2.800000
## 62 L 2015-01-02 3.9 3.350000
## 63 L 2015-01-03 5.8 4.166667
## 64 L 2015-01-04 4.1 4.150000
## 65 L 2015-01-05 2.7 3.860000

Calculate mean of each n-rows in a dataframe in r when the first row is varying

First make some example data:
df = data.frame(matrix(rnorm(200), nrow=100))
df1=data.frame(t(c(25,34)))
The starting row is different in each column. For example, in X1 I would like to start from 25 th row while in X2 from row 34. Then, I want to calculate the mean for each 5 values for the next 50 rows for all the columns in df.
I am new to R so this is probably very obvious. Can anyone provide some suggestions that how I can do this?
You could try Map.
lst <- Map(function(x,y) {x1 <- x[y:length(x)]
tapply(x1,as.numeric(gl(length(x1), 5,
length(x1))), FUN=mean)},
df, df1)
lst
# $X1
# 1 2 3 4 5 6
#-0.16500158 0.11339623 -0.86961872 -0.54985564 0.19958461 0.35234983
# 7 8 9 10 11 12
#0.32792769 0.65989801 -0.30409184 -0.53264725 -0.45792792 -0.59139844
# 13 14 15 16
# 0.03934133 -0.38068187 0.10100007 1.21017392
#$X2
# 1 2 3 4 5 6
# 0.24525622 0.07367300 0.18733973 -0.43784202 -0.45756095 -0.45740178
# 7 8 9 10 11 12
#-0.54086152 0.10439072 0.65660937 0.70623380 -0.51640088 0.46506135
# 13 14
#-0.09428336 -0.86295101
Because of the length difference, it might be better to keep it as a list. But, if you need it in a matrix/data.frame, you can make the lengths equal by padding with NAs.
do.call(cbind,lapply(lst, `length<-`,(max(sapply(lst, length)))))
Update
If you need only 50 rows, then change y:(length(x) to y:(y+49) in the Map code
data
set.seed(24)
df <- data.frame(matrix(rnorm(200), nrow=100))
df1 <- data.frame(t(c(25,34)))
Not entirely clear, especially, the second line of your code, but I think this might be close to what you want to do:
every_fifth_row <- df[seq(1, nrow(df), 5), ]
every_fifth_row
# X1 X2
# 1 -0.09490455 -0.28417104
# 6 -0.14949662 0.12857284
# 11 0.15297366 -0.84428186
# 16 -1.03397309 0.04775516
# 21 -1.95735213 -1.03750794
# 26 1.61135194 1.10189370
# 31 0.12447365 1.80792719
# 36 -0.92344017 0.66639710
# 41 -0.88764143 0.10858376
# 46 0.27761464 0.98382526
# 51 -0.14503359 -0.66868956
# 56 -1.70208187 0.05993688
# 61 0.33828525 1.00208639
# 66 -0.41427863 1.07969341
# 71 0.35027994 -1.46920059
# 76 1.38943839 0.01844205
# 81 -0.81560917 -0.32133221
# 86 1.38188423 -0.77755471
# 91 1.53247872 -0.98660308
# 96 0.45721909 -0.22855622
rowMeans(every_fifth_row)
colMeans(every_fifth_row)
# Alternative
# apply(every_fifth_row, 1, mean) # Row-wise mean
# apply(every_fifth_row, 2, mean) # Column-wise mean

How to approach loop with increasing variable name in R

My dataset is currently a set of answers to twenty questions with 300 observations.
Each of the questions are labled q1, q2, q3, etc.
Each observation gives a 1 to 10 response.
The code below is what I have. What I want is for the q1 to change when the counter changes in R.
totaltenq1 <- sum(UpdatedQualtrix$tenq1)
totalnineq1 <- sum(UpdatedQualtrix$nineq1)
totaleightq1 <- sum(UpdatedQualtrix$eightq1)
totalsevenq1 <- sum(UpdatedQualtrix$sevenq1)
totalsixq1 <- sum(UpdatedQualtrix$sixq1)
totalfiveq1 <- sum(UpdatedQualtrix$fiveq1)
totalfourq1 <- sum(UpdatedQualtrix$fourq1)
totalthreeq1 <- sum(UpdatedQualtrix$threeq1)
totaltwoq1 <- sum(UpdatedQualtrix$twoq1)
totaloneq1 <- sum(UpdatedQualtrix$oneq1)
totaltenq2 <- sum(UpdatedQualtrix$tenq2)
totalnineq2 <- sum(UpdatedQualtrix$nineq2)
totaleightq2 <- sum(UpdatedQualtrix$eightq2)
totalsevenq2 <- sum(UpdatedQualtrix$sevenq2)
totalsixq2 <- sum(UpdatedQualtrix$sixq2)
totalfiveq2 <- sum(UpdatedQualtrix$fiveq2)
totalfourq2 <- sum(UpdatedQualtrix$fourq2)
totalthreeq2 <- sum(UpdatedQualtrix$threeq2)
totaltwoq2 <- sum(UpdatedQualtrix$twoq2)
totaloneq2 <- sum(UpdatedQualtrix$oneq2)
I would like to have code that is
count = 20
for (i in 1:count){
totaltenq(i) <- sum(UpdatedQualtrix$tenq(i)
totalninq(I) <- sum(UpdatedQuatlrix$nineq(I)
etc
}
That way, when I do it again in the future, I can tell R how many questions it has the next time and it will change it. That way I don't have 10,000 lines of code from copying and pasting my code 20 times.
I don't think you need any loops at all. It just all depends on how you want to store those value. I'm a big fan of not having more variables than necessary.
Here's some sample data. I'll just make 10 rows (observations) with values 1-5.
set.seed(15)
Q<-3
numbs<-c("one","two","three","four","five","six","seven","eight","nine","ten")
qs<-paste0("q",1:Q)
qnumbs <- outer(numbs, qs, paste0)
UpdatedQualtrix <-data.frame(ID=1:10,
matrix(sample(1:5, 10*length(numbs)*Q, replace=T), nrow=10))
colnames(UpdatedQualtrix) <- c("ID",qnumbs)
Now I can sum up each of the columns with
( Qsums<-colSums(UpdatedQualtrix[, qnumbs]) )
# oneq1 twoq1 threeq1 fourq1 fiveq1 sixq1 sevenq1 eightq1 nineq1 tenq1
# 37 35 29 26 32 39 40 33 40 26
# oneq2 twoq2 threeq2 fourq2 fiveq2 sixq2 sevenq2 eightq2 nineq2 tenq2
# 37 31 19 29 25 38 36 35 28 27
# oneq3 twoq3 threeq3 fourq3 fiveq3 sixq3 sevenq3 eightq3 nineq3 tenq3
# 37 30 31 31 24 31 29 31 25 41
And if we want the totals per question we can do
sapply(qs, function(a, b) sum(Qsums[paste0(b,a)]), b=numbs)
# q1 q2 q3
# 337 305 310
Or if we want the counts per response we can do
sapply(numbs, function(a, b) sum(Qsums[paste0(a,b)]), b=qs)
# one two three four five six seven eight nine ten
# 111 96 79 86 81 108 105 99 93 94
You might want to also consider melting your data since it's so structured. You can use the reshape2 library to help. You can do
require(reshape2)
mm <- melt(UpdatedQualtrix, id.vars="ID")
mm <- cbind(mm[,-2], colsplit(mm$variable, "q", c("resp","q")))
mm$resp <- factor(mm$resp, levels=numbs)
to turn your data into a "tall" format so each value has it's own row with a column for ID, value, response and question.
str(mm)
# 'data.frame': 300 obs. of 4 variables:
# $ ID : int 1 2 3 4 5 6 7 8 9 10 ...
# $ value: int 4 1 5 4 2 5 5 2 4 5 ...
# $ resp : Factor w/ 10 levels "one","two","three",..: 1 1 1 1 1 1 1 1 1 1 ...
# $ q : int 1 1 1 1 1 1 1 1 1 1 ...
And then we can more easily do other calculations. Of you want the total scores by question, you could do
aggregate(value~q, mm, sum)
# q value
# 1 1 337
# 2 2 305
# 3 3 310
If you wanted the average value for each question/response you could do
with(mm, tapply(value, list(q,resp), mean))
# one two three four five six seven eight nine ten
# 1 3.7 3.5 2.9 2.6 3.2 3.9 4.0 3.3 4.0 2.6
# 2 3.7 3.1 1.9 2.9 2.5 3.8 3.6 3.5 2.8 2.7
# 3 3.7 3.0 3.1 3.1 2.4 3.1 2.9 3.1 2.5 4.1

R Programming Calculate Rows Average

How to use R to calculate row mean ?
Sample data:
f<- data.frame(
name=c("apple","orange","banana"),
day1sales=c(2,5,4),
day1sales=c(2,8,6),
day1sales=c(2,15,24),
day1sales=c(22,51,13),
day1sales=c(5,8,7)
)
Expected Results :
Subsequently the table will add more column for example the expected results is only until AverageSales day1sales.4. After running more data, it will add on to day1sales.6 and so on. So how can I count the average for all the rows?
with rowMeans
> rowMeans(f[-1])
## [1] 6.6 17.4 10.8
You can also add another column to of means to the data set
> f$AvgSales <- rowMeans(f[-1])
> f
## name day1sales day1sales.1 day1sales.2 day1sales.3 day1sales.4 AvgSales
## 1 apple 2 2 2 22 5 6.6
## 2 orange 5 8 15 51 8 17.4
## 3 banana 4 6 24 13 7 10.8
rowMeans is the simplest way. Also the function apply will apply a function along the rows or columns of a data frame. In this case you want to apply the mean function to the rows:
f$AverageSales <- apply(f[, 2:length(f)], 1, mean)
(changed 6 to length(f) since you say you may add more columns).
will add an AverageSales column to the dataframe f with the value that you want
> f
## name day1sales day1sales.1 day1sales.2 day1sales.3 day1sales.4 means
##1 apple 2 2 2 22 5 6.6
##2 orange 5 8 15 51 8 17.4
##3 banana 4 6 24 13 7 10.8

Resources