Find a range in R and replace it with the median - r

I have a data called data_v and one of the columns is salaries. The range of the data is between 0 and 140 000. I want to find different ranges(range1: 0-10000, range2: 10000-20000...) calculate the median of each range and replace the range with its median.
Using this I am able to get the desired output:
first = data_v$salaries[data_v$salaries>=0 & data_v$salaries<10000]
data_v$salaries[data_v$salaries>=0 & data_v$salaries<10000] = median(first)
second = data_v$salaries[data_v$salaries>=10000 & data_v$salaries<20000]
data_v$salaries[data_v$salaries>=10000 & data_v$salaries<20000] = median(second)
.............
ten=data_v$salaries[data_v$salaries>=90000 & data_v$salaries<=100000]
data_v$salaries[data_v$salaries >= 90000 & data_v$salaries <= 100000] = median(ten)
Output:
table(data_v$salaries)
median 7949 17523 25939 34302 42827 56840 65423 73292 81900 95479.75
# 130 2022 8481 9233 2661 1270 3864 2232 176 4
I tried to implement the same thing with while loop without success:
i <- 0;
while(i <=140000) {
m = data_v$salaries[data_v$salaries >= i & data_v$salaries < (i + 10000)]
data_v$salaries[data_v$salaries >= i & data_v$salaries < (i + 10000)] = median(m)
i <- i + 10000; }
Any help/suggestions are more then welcomed.

data(mtcars) # data for test
step = 10 # interval length, 10000 for your data
n = ceiling(max(mtcars$mpg)/step) # number of intervals
mtcars$mpg_interval = cut(mtcars$mpg, step*(0:n))
mtcars$mpg_median = ave(mtcars$mpg, mtcars$mpg_interval, FUN = median)

Related

Simulating data sample

I have the following probabilities for each group, and each group represents a certain range of values. My goal is to simulate 1,234 rows of data that corresponds with the groups and percentages:
ages = c(21:29, 30:39,40:49, 50:59, 60:69, 70:79, 80:89, 90:99)
age_probs = c(10.85,12.64,14.02,25.00,19.01,11.45,7.01,0.01) / 100
age_bins = sapply(list(21:29, 30:39,40:49, 50:59, 60:69, 70:79, 80:89, 90:99), length)
age_weighted = rep(age_probs/age_bins, age_bins)
set.seed(1)
n = 1234
data = data.frame(ID = sample(n),
Age = sample(ages, size = n, prob = age_weighted, replace = TRUE))
However, the percentages of the data don't match and is too different at times (I assume because the data isn't big enough). I found another post, which mentions that this happens because this, our "view" of the randomness is effectively "one cell at a time", instead of "one column at a time". This is in reference to the sample() function.
How can I change my sample function to better represent the population percentages?
Oh and here is how I checked the columns of my data frame
to_export = data[order(data$ID),]
for (i in (1:length(to_export$Age))) {
if (to_export$Age[i] >= 21 & to_export$Age[i] <= 29) to_export$block[i] = "21-29"
if (to_export$Age[i] >= 30 & to_export$Age[i] <= 39) to_export$block[i] = "30-39"
if (to_export$Age[i] >= 40 & to_export$Age[i] <= 49) to_export$block[i] = "40-49"
if (to_export$Age[i] >= 50 & to_export$Age[i] <= 59) to_export$block[i] = "50-59"
if (to_export$Age[i] >= 60 & to_export$Age[i] <= 69) to_export$block[i] = "60-69"
if (to_export$Age[i] >= 70 & to_export$Age[i] <= 79) to_export$block[i] = "70-79"
if (to_export$Age[i] >= 80 & to_export$Age[i] <= 89) to_export$block[i] = "80-89"
if (to_export$Age[i] >= 90) to_export$block[i] = "90+"
}
#to_export
age_table = to_export %>% group_by(block) %>% summarise(percentage = round(n()/1234 * 100,2))
age_table
I suggest a small redesign. I'm using dplyr and ggplot but basically they aren't needed:
set.seed(1)
n = 1234
# Definition of the age buckets
ages = c("21:29", "30:39","40:49", "50:59", "60:69", "70:79", "80:89", "90:99")
# probability for each bucket
age_probs = c(10.85,12.64,14.02,25.00,19.01,11.45,7.01,0.01)
# normalise the probabilities since they don't add up to 1
c_age_probs = cumsum(age_probs)/sum(age_probs)
# create the data.frame
data = data.frame(ID = 1:n,
Age = ages[findInterval(runif(n), c_age_probs) + 1])
# plotting the data
ggplot(data, aes(x=Age)) +
geom_bar()
The datas plot looks okay, according to the given probabilities. Let's take a look at the percentages:
# getting the percentage
data %>%
group_by(Age) %>%
summarise(percentage = n()/n)
# A tibble: 7 x 2
# Age percentage
# <chr> <dbl>
# 1 21:29 0.0989
# 2 30:39 0.105
# 3 40:49 0.133
# 4 50:59 0.269
# 5 60:69 0.198
# 6 70:79 0.126
# 7 80:89 0.0705
The key part is ages[findInterval(runif(n), c_age_probs) + 1]. I created some uniform distributed numbers and used the cumulated (and normalised) probabilities to get the corresponding age bucket. By doing so I didn't even need to create multiple case_when-statements.

optimize R code for min() and sample() by group

I generate a network with npeople(=80), ncomp(=4) components and I want each component to have density equal to dens(=0.2).
I want to optimize 2 lines of the code which take most of the time (especially if I want to have 5k people in the network).
the 2 lines are:
# adjust probability to keep density
nodes[,p:= as.numeric(min(c(1, p * (1/(mean(nodes$p) / c.dens))))), by = c("ID","ALTERID")]
# simulate edges
nodes[, edge := sample(c(0,1),1, prob = c(1-p,p)), by = c("ID","ALTERID")]
I have tried using the lapply() function, but the execution time increased - see below the line of code:
nodes[,lapply(.SD, function(p) min(c(1, p * (1/(mean(nodes$p) / c.dens))))), by = c("ID","ALTERID")]
rm(list=ls())
library(data.table)
library(intergraph)
library(igraph)
library(Matrix)
library(profvis)
library(ggplot2)
draw.var <- function(n, var1, rho, mean){
C <- matrix(rho, nrow = 2, ncol = 2)
diag(C) <- 1
C <- chol(C)
S <- rnorm(n, mean = mean)
S <- cbind(scale(var1)[1:n],S)
ZS <- S %*% C
return(ZS[,2])
}
set.seed(1123)
profvis({
# create empty list to store data
dt.list <- list()
npeople <- 500
dens <- .2
OC.impact <- FALSE
cor_iv_si <- .6
cor_iv_uc <- 0
cor_uc_oc <- 0.6
ncomp <- 4
beta_oc <- 2 # observed characteristics
beta_uc <- 2 # unobserved characteristics
beta_si <- 1
# create data.table
dt.people <- data.table(ego = 1:npeople)
# draw observed characteristics
dt.people[, OC := abs(rt(npeople,2))]
# draw unobserved variable
dt.people[, UC := draw.var(npeople, dt.people$OC, rho = cor_uc_oc,mean = 5)]
# set component idientifier
dt.people$group <- cut_number(dt.people$UC, ncomp,labels = F)
for(q in 1:ncomp){
# subset comp
dt.sub <- dt.people[group == q]
# create undirected graph
nodes <- as.data.table(t(combn(dt.sub$ego, 2)))
setnames(nodes,c("ID","ALTERID"))
# add attributes
nodes <- merge(nodes,dt.people[,list(ID = ego, ID.UC = UC, ID.OC = OC)], by = "ID")
nodes <- merge(nodes,dt.people[,list(ALTERID = ego, ALTERID.UC = UC, ALTERID.OC = OC)], by = "ALTERID")
# calculate distance
nodes[,d := abs(ID.UC - ALTERID.UC)]
# estimate the appropiate density per component
n.edges <- (dens * (npeople * (npeople - 1)))/ncomp
n.nodes <- npeople/ncomp
c.dens <- n.edges/(n.nodes * (n.nodes - 1))
# estimate initial probability of tie based on distance
coefficient <- log(c.dens / (1 - c.dens))
alpha <- coefficient / mean(nodes$d)
nodes[,p := exp(alpha * d) / (1 + exp(alpha * d))]
# adjust probability to keep density
nodes[,p:= as.numeric(min(c(1, p * (1/(mean(nodes$p) / c.dens))))), by = c("ID","ALTERID")]
# simulate edges
nodes[, edge := sample(c(0,1),1, prob = c(1-p,p)), by = c("ID","ALTERID")]
# keep the edges
nodes <- nodes[edge == 1,list(ID,ALTERID)]
# bind the networks
if(q == 1){
net <- copy(nodes)
} else{
net <- rbind(net,nodes)
}
}
# create opposide direction
net <- rbind(net,net[,list(ID = ALTERID, ALTERID = ID)])
})
This incorporates #BenBolker and # DavidArenburg's suggestions and also incorporates some of data.table's tools.
Non-Equi joins
The OP code loops through each group. One part of the code also uses combn and multiple joins to get the data in the right format. Using non-equi joins, we can combine all of those steps in one data.table call
dt_non_sub <- dt.people[dt.people,
on = .(ego < ego, group = group),
allow.cartesian = T,
nomatch = 0L,
.(group,
ALTERID = i.ego, ID = x.ego,
ID.UC = UC, ID.OC = OC,
ALTERID.OC = i.OC, ALTERID.UC = i.UC,
d = abs(UC - i.UC)) #added to be more efficient
]
# dt_non_sub[, d:= abs(ID.UC - ALTERID.UC)]
Vectorization
The original code was mostly slow because of two calls with by groupings. Since each call split the dataframe in around 8,000 individual groups, there were 8,000 functions calls each time. This eliminates those by using pmin as suggested by #DavidArenburg and then uses runif(N)<p as suggested by #BenBolker. My addition was that since your final result don't seem to care about p, I only assigned the edge by using {} to only return the last thing calculated in the call.
# alpha <- coefficient / mean(nodes$d)
dt_non_sub[,
edge := {
alpha = coefficient / mean(d)
p = exp(alpha * d) / (1 + exp(alpha * d))
p_mean = mean(p)
p = pmin(1, p * (1/(p_mean / c.dens)))
as.numeric(runif(.N)<p)
}
, by = .(group)]
net2 <- rbindlist(dt_non_sub[edge == 1, .(group, ALTERID, ID)],
dt_non_sub[edge == 1, .(group, ID = ALTERID, ALTERID = ID)]
One thing to note is that the vectorization is not 100% identical. Your code was recursive, each split updated the mean(node$p) for the next ID, ALTERID group. If you need that recursive part of the call, there's not much help to make it faster.
In the end, the modified code runs in 20 ms vs. the 810 ms of your original function. The results, while different, are somewhat similar in the total number of results:
Original:
net
ID ALTERID
1: 5 10
2: 10 14
3: 5 25
4: 10 25
5: 14 25
---
48646: 498 458
48647: 498 477
48648: 498 486
48649: 498 487
48650: 498 493
Modified
net2
group ALTERID ID
1: 2 4 3
2: 2 6 4
3: 4 7 1
4: 4 8 7
5: 2 9 4
---
49512: 3 460 500
49513: 3 465 500
49514: 3 478 500
49515: 3 482 500
49516: 3 497 500

R: Error in calculating the average of a variable at different time intervals for many factors using for loop

I have a data frame in which a variable(var1) is expressed over time in seconds. I want to calculate the mean of var1 for each sample at different time intervals (10 seconds interval until 500 seconds).
the dataframe looks like this:
sample time var1
S1 1 3.5
S1 2 6.3
S1 3 7.8
S1 4 20.5
S1 … ...
S1 530 4.5
S2 1 6.7
S2 2 20.3
S2 3 5.4
S2 … ...
S2 710 70.3
...
The data frame that I want to obtain looks like this
Sample var1_mean10:20sec var1_mean20:30sec .... var1_mean490:500sec
S1
S2
..
So I wrote this code:
setwd("…")
A <- read_excel("dati.xlsx")
for (cat in unique(A$sample))
{
A.s <- subset(A, A$sample == cat)
cuts <- cut (A.s$time, breaks=seq.int(from = 0, to = 500, by = 10))
d <- by (A.s$var1, cuts, mean)
Y<-data.frame(d)
j <- t(Y)
write.csv(Y, file = paste(cat, "var1", sep = "_"))
}
But when I run it I get Error message: Error in as.data.frame.default(x[[i]], optional = TRUE) : cannot coerce class ""by"" to a data.frame
The plan is to eventually merge all the different csv.
If I understood your problem correctly you are trying to average your data in 10 second interval. I would like to propose an alternative approach using the function aggregate to compute the mean across the 10 seconds interval. The 10 seconds interval would be created through a fictitious 'time' array used to group your 10 seconds interval and then averaging.
# try to create some data similar to yours
A <- data.frame(sample = c(rep('A1', 530), rep('A2', 710)),
time = c(1 : 530, 1:710), var1 = runif(530+710))
A$times <- ceiling(A$time / 10)
Y <- aggregate(var1 ~ sample + times, data = A, FUN = mean)
Then you could export tmp straightaway.
HTH
Solved :
A <- read_excel("data.xlsx")
n <- subset(A, time <= 500)
d<-data.frame(sample= n$sample, time= n$time, ms=n$var1)
storage.data<-data.frame(matrix(nrow = n, ncol = n))
for(cat in unique(d$sample)){
g <- subset(d, d$sample == cat)
cuts <- cut (g$time, breaks=seq.int(from = 0, to = 500, by = 10))
p <- by (g$ms, cuts, mean)
storage.data[cat] = p}
View(storage.data)
storage.data_t <- t(storage.data)
View(storage.data_t)
write.csv(storage.data_t, file = "filename.csv")

Return specific subset results of boolean data using r

I have a matrix of data, and I want to return the summary/mean of specific columns, after a boolean command has been issued.
I tried:
by(data$total > 500, data$operation1 == 1 & data$operation2 == 1, summary)
However it just returns, the number of results that are TRUE or FALSE. Not the actual mean/summary of the totals > 500.
I then tried:
summary(subset(data, data$total > 500 & data$operation1 == 1 & data$operation2 == 1))
Which did work, however it returned all the subset of all the columns in my data, and not just the totals > 500, which is what I'm looking for.
I have a feeling the correct answer is a mix of the subset() and by() commands, but I'm coming up with a blank.
Thanks for your input.
This is a possibility:
# build small dataset
factor1 <- factor(rep(1:2,each=25))
factor2 <- factor(rep(3:4,each=25))
data<-rnorm(50,500,50)
alt.data<-rnorm(50,500,50)
frame <- data.frame(factor1,factor2,data,alt.data)
# subset the dataframe
subset(frame, data>500 & factor1==1 & factor2==3)
# summarize the one variable
summary(subset(frame, data>500 & factor1==1 & factor2==3)[,3])
# or if you want multiple columns
summary(subset(frame, data>500 & factor1==1 & factor2==3)[,3:4])
If I am interpreting your question correctly.
Try this
data <- data.frame(total = sample(seq(490,510), 10),
operation1 = sample(seq(1,2), 10, replace = T),
operation2 = sample(seq(1,2), 10, replace = T),
ColumnToSum1 = rnorm(10, 2, 6),
ColumnToSum2 = rnorm(10, 2, 6)) # Your data
summary(data[data$total > 500 & data$operation1 == 1 & data$operation2 == 1, c("ColumnToSum1", "ColumnToSum2")])
colMeans(data[data$total > 500 & data$operation1 == 1 & data$operation2 == 1, c("ColumnToSum1", "ColumnToSum2")], na.rm = T)
Example results:
ColumnToSum1 ColumnToSum2
Min. :-0.99907 Min. : 6.973
1st Qu.:-0.08076 1st Qu.: 9.001
Median : 0.83755 Median :11.028
Mean : 0.83755 Mean :11.028
3rd Qu.: 1.75586 3rd Qu.:13.055
Max. : 2.67416 Max. :15.082
ColumnToSum1 ColumnToSum2
0.8375483 11.0277917

how to calculate the median on grouped dataset?

My dataset is as following:
salary number
1500-1600 110
1600-1700 180
1700-1800 320
1800-1900 460
1900-2000 850
2000-2100 250
2100-2200 130
2200-2300 70
2300-2400 20
2400-2500 10
How can I calculate the median of this dataset? Here's what I have tried:
x <- c(110, 180, 320, 460, 850, 250, 130, 70, 20, 10)
colnames <- "numbers"
rownames <- c("[1500-1600]", "(1600-1700]", "(1700-1800]", "(1800-1900]",
"(1900-2000]", "(2000,2100]", "(2100-2200]", "(2200-2300]",
"(2300-2400]", "(2400-2500]")
y <- matrix(x, nrow=length(x), dimnames=list(rownames, colnames))
data.frame(y, "cumsum"=cumsum(y))
numbers cumsum
[1500-1600] 110 110
(1600-1700] 180 290
(1700-1800] 320 610
(1800-1900] 460 1070
(1900-2000] 850 1920
(2000,2100] 250 2170
(2100-2200] 130 2300
(2200-2300] 70 2370
(2300-2400] 20 2390
(2400-2500] 10 2400
Here, you can see the half-way frequency is 2400/2=1200. It is between 1070 and 1920. Thus the median class is the (1900-2000] group. You can use the formula below to get this result:
Median = L + h/f (n/2 - c)
where:
L is the lower class boundary of median class
h is the size of the median class i.e. difference between upper and lower class boundaries of median class
f is the frequency of median class
c is previous cumulative frequency of the median class
n/2 is total no. of observations divided by 2 (i.e. sum f / 2)
Alternatively, median class is defined by the following method:
Locate n/2 in the column of cumulative frequency.
Get the class in which this lies.
And in code:
> 1900 + (1200 - 1070) / (1920 - 1070) * (2000 - 1900)
[1] 1915.294
Now what I want to do is to make the above expression more elegant - i.e. 1900+(1200-1070)/(1920-1070)*(2000-1900). How can I achieve this?
Since you already know the formula, it should be easy enough to create a function to do the calculation for you.
Here, I've created a basic function to get you started. The function takes four arguments:
frequencies: A vector of frequencies ("number" in your first example)
intervals: A 2-row matrix with the same number of columns as the length of frequencies, with the first row being the lower class boundary, and the second row being the upper class boundary. Alternatively, "intervals" may be a column in your data.frame, and you may specify sep (and possibly, trim) to have the function automatically create the required matrix for you.
sep: The separator character in your "intervals" column in your data.frame.
trim: A regular expression of characters that need to be removed before trying to coerce to a numeric matrix. One pattern is built into the function: trim = "cut". This sets the regular expression pattern to remove (, ), [, and ] from the input.
Here's the function (with comments showing how I used your instructions to put it together):
GroupedMedian <- function(frequencies, intervals, sep = NULL, trim = NULL) {
# If "sep" is specified, the function will try to create the
# required "intervals" matrix. "trim" removes any unwanted
# characters before attempting to convert the ranges to numeric.
if (!is.null(sep)) {
if (is.null(trim)) pattern <- ""
else if (trim == "cut") pattern <- "\\[|\\]|\\(|\\)"
else pattern <- trim
intervals <- sapply(strsplit(gsub(pattern, "", intervals), sep), as.numeric)
}
Midpoints <- rowMeans(intervals)
cf <- cumsum(frequencies)
Midrow <- findInterval(max(cf)/2, cf) + 1
L <- intervals[1, Midrow] # lower class boundary of median class
h <- diff(intervals[, Midrow]) # size of median class
f <- frequencies[Midrow] # frequency of median class
cf2 <- cf[Midrow - 1] # cumulative frequency class before median class
n_2 <- max(cf)/2 # total observations divided by 2
unname(L + (n_2 - cf2)/f * h)
}
Here's a sample data.frame to work with:
mydf <- structure(list(salary = c("1500-1600", "1600-1700", "1700-1800",
"1800-1900", "1900-2000", "2000-2100", "2100-2200", "2200-2300",
"2300-2400", "2400-2500"), number = c(110L, 180L, 320L, 460L,
850L, 250L, 130L, 70L, 20L, 10L)), .Names = c("salary", "number"),
class = "data.frame", row.names = c(NA, -10L))
mydf
# salary number
# 1 1500-1600 110
# 2 1600-1700 180
# 3 1700-1800 320
# 4 1800-1900 460
# 5 1900-2000 850
# 6 2000-2100 250
# 7 2100-2200 130
# 8 2200-2300 70
# 9 2300-2400 20
# 10 2400-2500 10
Now, we can simply do:
GroupedMedian(mydf$number, mydf$salary, sep = "-")
# [1] 1915.294
Here's an example of the function in action on some made up data:
set.seed(1)
x <- sample(100, 100, replace = TRUE)
y <- data.frame(table(cut(x, 10)))
y
# Var1 Freq
# 1 (1.9,11.7] 8
# 2 (11.7,21.5] 8
# 3 (21.5,31.4] 8
# 4 (31.4,41.2] 15
# 5 (41.2,51] 13
# 6 (51,60.8] 5
# 7 (60.8,70.6] 11
# 8 (70.6,80.5] 15
# 9 (80.5,90.3] 11
# 10 (90.3,100] 6
### Here's GroupedMedian's output on the grouped data.frame...
GroupedMedian(y$Freq, y$Var1, sep = ",", trim = "cut")
# [1] 49.49231
### ... and the output of median on the original vector
median(x)
# [1] 49.5
By the way, with the sample data that you provided, where I think there was a mistake in one of your ranges (all were separated by dashes except one, which was separated by a comma), since strsplit uses a regular expression by default to split on, you can use the function like this:
x<-c(110,180,320,460,850,250,130,70,20,10)
colnames<-c("numbers")
rownames<-c("[1500-1600]","(1600-1700]","(1700-1800]","(1800-1900]",
"(1900-2000]"," (2000,2100]","(2100-2200]","(2200-2300]",
"(2300-2400]","(2400-2500]")
y<-matrix(x,nrow=length(x),dimnames=list(rownames,colnames))
GroupedMedian(y[, "numbers"], rownames(y), sep="-|,", trim="cut")
# [1] 1915.294
I've written it like this to clearly explain how it's being worked out. A more compact version is appended.
library(data.table)
#constructing the dataset with the salary range split into low and high
salarydata <- data.table(
salaries_low = 100*c(15:24),
salaries_high = 100*c(16:25),
numbers = c(110,180,320,460,850,250,130,70,20,10)
)
#calculating cumulative number of observations
salarydata <- salarydata[,cumnumbers := cumsum(numbers)]
salarydata
# salaries_low salaries_high numbers cumnumbers
# 1: 1500 1600 110 110
# 2: 1600 1700 180 290
# 3: 1700 1800 320 610
# 4: 1800 1900 460 1070
# 5: 1900 2000 850 1920
# 6: 2000 2100 250 2170
# 7: 2100 2200 130 2300
# 8: 2200 2300 70 2370
# 9: 2300 2400 20 2390
# 10: 2400 2500 10 2400
#identifying median group
mediangroup <- salarydata[
(cumnumbers - numbers) <= (max(cumnumbers)/2) &
cumnumbers >= (max(cumnumbers)/2)]
mediangroup
# salaries_low salaries_high numbers cumnumbers
# 1: 1900 2000 850 1920
#creating the variables needed to calculate median
mediangroup[,l := salaries_low]
mediangroup[,h := salaries_high - salaries_low]
mediangroup[,f := numbers]
mediangroup[,c := cumnumbers- numbers]
n = salarydata[,sum(numbers)]
#calculating median
median <- mediangroup[,l + ((h/f)*((n/2)-c))]
median
# [1] 1915.294
The compact version -
EDIT: Changed to a function at #AnandaMahto's suggestion. Also, using more general variable names.
library(data.table)
#Creating function
CalculateMedian <- function(
LowerBound,
UpperBound,
Obs
)
{
#calculating cumulative number of observations and n
dataset <- data.table(UpperBound, LowerBound, Obs)
dataset <- dataset[,cumObs := cumsum(Obs)]
n = dataset[,max(cumObs)]
#identifying mediangroup and dynamically calculating l,h,f,c. We already have n.
median <- dataset[
(cumObs - Obs) <= (max(cumObs)/2) &
cumObs >= (max(cumObs)/2),
LowerBound + ((UpperBound - LowerBound)/Obs) * ((n/2) - (cumObs- Obs))
]
return(median)
}
# Using function
CalculateMedian(
LowerBound = 100*c(15:24),
UpperBound = 100*c(16:25),
Obs = c(110,180,320,460,850,250,130,70,20,10)
)
# [1] 1915.294
(Sal <- sapply( strsplit(as.character(dat[[1]]), "-"),
function(x) mean( as.numeric(x) ) ) )
[1] 1550 1650 1750 1850 1950 2050 2150 2250 2350 2450
require(Hmisc)
wtd.mean(Sal, weights = dat[[2]])
[1] 1898.75
wtd.quantile(Sal, weights=dat[[2]], probs=0.5)
Generalization to a weighed median might require looking for a package that has such.
Have you tried median or apply(yourobject,2,median) if it is a matrix or data.frame ?
What about this way? Create vectors for each salary bracket, assuming an even spread over each band. Then make one big vector from those vectors, and take the median. Similar to you, but a slightly different result. I'm not a mathematician, so the method could be incorrect.
dat <- matrix(c(seq(1500, 2400, 100), seq(1600, 2500, 100), c(110, 180, 320, 460, 850, 250, 130, 70, 20, 10)), ncol=3)
median(unlist(apply(dat, 1, function(x) { ((1:x[3])/x[3])*(x[2]-x[1])+x[1] })))
Returns 1915.353
I think this concept should work you.
$salaries = array(
array("1500","1600"),
array("1600","1700"),
array("1700","1800"),
array("1800","1900"),
array("1900","2000"),
array("2000","2100"),
array("2100","2200"),
array("2200","2300"),
array("2300","2400"),
array("2400","2500"),
);
$numbers = array("110","180","320","460","850","250","130","70","20","10");
$cumsum = array();
$n = 0;
$count = 0;
foreach($numbers as $key=>$number){
$cumsum[$key] = $number;
$n += $number;
if($count > 0){
$cumsum[$key] += $cumsum[$key-1];
}
++$count;
}
$classIndex = 0;
foreach($cumsum as $key=>$cum){
if($cum < ($n/2)){
$classIndex = $key+1;
}
}
$classRange = $salaries[$classIndex];
$L = $classRange[0];
$h = (float) $classRange[1] - $classRange[0];
$f = $numbers[$classIndex];
$c = $numbers[$classIndex-1];
$Median = $L + ($h/$f)*(($n/2)-$c);
echo $Median;

Resources