How to plot the recursive partitioning from the rpart package - r

I want to plot a partition of a two-dimensional covariate space constructed by recursive binary splitting. To be more precise, I would like to write a function that replicates the following graph (taken from Elements of Statistical Learning, pag. 306):
Displayed above is a two-dimensional covariate space and a partition obtained by recursive binary splitting the space using axis-aligned splits (what is also called a CART algorithm). What I want to implement is a function that takes the output of the rpart function and generates such plot.
It follows some example code:
## Generating data.
set.seed(1975)
n <- 5000
p <- 2
X <- matrix(sample(seq(0, 1, by = 0.01), n * p, replace = TRUE), ncol = p)
Y <- X[, 1] + 2 * X[, 2] + rnorm(n)
## Building tree.
tree <- rpart(Y ~ ., data = data.frame(Y, X), method = "anova", control = rpart.control(cp = 0, maxdepth = 2))
Navigating SO I found this function:
rpart_splits <- function(fit, digits = getOption("digits")) {
splits <- fit$splits
if (!is.null(splits)) {
ff <- fit$frame
is.leaf <- ff$var == "<leaf>"
n <- nrow(splits)
nn <- ff$ncompete + ff$nsurrogate + !is.leaf
ix <- cumsum(c(1L, nn))
ix_prim <- unlist(mapply(ix, ix + c(ff$ncompete, 0), FUN = seq, SIMPLIFY = F))
type <- rep.int("surrogate", n)
type[ix_prim[ix_prim <= n]] <- "primary"
type[ix[ix <= n]] <- "main"
left <- character(nrow(splits))
side <- splits[, 2L]
for (i in seq_along(left)) {
left[i] <- if (side[i] == -1L)
paste("<", format(signif(splits[i, 4L], digits)))
else if (side[i] == 1L)
paste(">=", format(signif(splits[i, 4L], digits)))
else {
catside <- fit$csplit[splits[i, 4L], 1:side[i]]
paste(c("L", "-", "R")[catside], collapse = "", sep = "")
}
}
cbind(data.frame(var = rownames(splits),
type = type,
node = rep(as.integer(row.names(ff)), times = nn),
ix = rep(seq_len(nrow(ff)), nn),
left = left),
as.data.frame(splits, row.names = F))
}
}
Using this function, I am able to recover all the splitting variables and points:
splits <- rpart_splits(tree)[rpart_splits(tree)$type == "main", ]
splits
# var type node ix left count ncat improve index adj
# 1 X2 main 1 1 < 0.565 5000 -1 0.18110662 0.565 0
# 3 X2 main 2 2 < 0.265 2814 -1 0.06358597 0.265 0
# 6 X1 main 3 5 < 0.645 2186 -1 0.07645851 0.645 0
The column var tells me the splitting variables for each non-terminal node, and the column left tells the associated splitting points. However, I do not know how to use this information to produce my desired plots.
Of course if you have any alternative strategy that do not involve the use of rpart_splits feel free to suggest it.

You could use the (unpublished) parttree package, which you can install from GitHub via:
remotes::install_github("grantmcdermott/parttree")
This allows:
library(parttree)
ggplot() +
geom_parttree(data = tree, aes(fill = path)) +
coord_cartesian(xlim = c(0, 1), ylim = c(0, 1)) +
scale_fill_brewer(palette = "Pastel1", name = "Partitions") +
theme_bw(base_size = 16) +
labs(x = "X2", y = "X1")
Incidentally, this package also contains the function parttree, which returns something very similar to your
rpart_splits function:
parttree(tree)
node Y path xmin xmax ymin ymax
1 4 0.7556079 X2 < 0.565 --> X2 < 0.265 -Inf 0.265 -Inf Inf
2 5 1.3087679 X2 < 0.565 --> X2 >= 0.265 0.265 0.565 -Inf Inf
3 6 1.8681143 X2 >= 0.565 --> X1 < 0.645 0.565 Inf -Inf 0.645
4 7 2.4993361 X2 >= 0.565 --> X1 >= 0.645 0.565 Inf 0.645 Inf

Related

How can I find the maximum output of a function

If I have a GLM, is there any way I can efficiently find the maximum output by changing one covariate and holding the others?
Using my simulated data:
# FUNCTIONS ====================================================================
logit <- function(p){
x = log(p/(1-p))
x
}
sigmoid <- function(x){
p = 1/(1 + exp(-x))
p
}
beta_duration <- function(D, select){
logit(
switch(select,
0.05 + 0.9 / (1 + exp(-2*D + 25)),
0.9 * exp(-exp(-0.5 * (D - 11))),
0.9 * exp(-exp(-(D - 11))),
0.9 * exp(-2 * exp(-(D - 9))),
sigmoid(0.847 + 0.210 * (D - 10)),
0.7 + 0.0015 * (D - 10) ^ 2,
0.7 - 0.0015 * (D - 10) ^ 2 + 0.03 * (D - 10)
)
)
}
beta_sex <- function(sex, OR = 1){
ifelse(sex == "Female", -0.5 * log(OR), 0.5 * log(OR))
}
plot_beta_duration <- function(select){
x <- seq(10, 20, by = 0.01)
y <- beta_duration(x, select)
data.frame(x = x,
y = y) %>%
ggplot(aes(x = x, y = y)) +
geom_line() +
ylim(0, 1)
}
# DATA SIMULATION ==============================================================
duration <- c(10, 12, 14, 18, 20)
sex <- factor(c("Female", "Male"))
eta <- function(duration, sex, duration_select, sex_OR, noise_sd){
beta_sex(sex, sex_OR) + beta_duration(duration, duration_select) + rnorm(length(duration), 0, noise_sd)
}
sim_data <- function(durations_type, sex_OR, noise_sd, p_female, n, seed){
set.seed(seed)
data.frame(
duration = sample(duration, n, TRUE),
sex = sample(sex, n, TRUE, c(p_female, 1 - p_female))
) %>%
rowwise() %>%
mutate(eta = eta(duration, sex, durations_type, sex_OR, noise_sd),
p = sigmoid(eta),
cured = sample(0:1, 1, prob = c(1 - p, p)))
}
# DATA SIM PARAMETERS
durations_type <- 4 # See beta_duration for functions
sex_OR <- 3 # Odds of cure for male vs female (ref)
noise_sd <- 1
p_female <- 0.7 # proportion of females in the sample
n <- 500
data <- sim_data(durations_type = 1, # See beta_duration for functions
sex_OR = 3, # Odds of cure for male vs female (ref)
noise_sd = 1,
p_female = 0.7, # proportion of females in the sample
n = 500,
seed = 21874564)
I am fitting a fractional polynomial GLM:
library(mfp)
model1 <- mfp(cured ~ fp(duration) + sex,
family = binomial(link = "logit"),
data = data)
summary(model1)
Given that I am holding sex as constant, is there any way to find the value of duration within a certain range that gives me the highest predicted value? Something less inefficient than:
range <- seq(10, 20, by = 1e-4)
range[which.max(predict(model, type = "response", newdata = data.frame(duration = range, sex = "Male")))]
You can use optimize here. Just create a function which returns a prediction based on the value of duration:
f <- function(x) predict(model1, list(sex = 'Male', duration = x))
And we can find the value of duration which produces the maximum log odds within the range 0-20 by doing:
optimise(f, c(0, 20), maximum = TRUE)$maximum
#> [1] 17.95679

Find value of covariate given a probability in R

Given a fractional polynomial GLM, I am looking to find the value of a covariate that gives me an output of a given probability.
My data is simulated using:
# FUNCTIONS ====================================================================
logit <- function(p){
x = log(p/(1-p))
x
}
sigmoid <- function(x){
p = 1/(1 + exp(-x))
p
}
beta_duration <- function(D, select){
logit(
switch(select,
0.05 + 0.9 / (1 + exp(-2*D + 25)),
0.9 * exp(-exp(-0.5 * (D - 11))),
0.9 * exp(-exp(-(D - 11))),
0.9 * exp(-2 * exp(-(D - 9))),
sigmoid(0.847 + 0.210 * (D - 10)),
0.7 + 0.0015 * (D - 10) ^ 2,
0.7 - 0.0015 * (D - 10) ^ 2 + 0.03 * (D - 10)
)
)
}
beta_sex <- function(sex, OR = 1){
ifelse(sex == "Female", -0.5 * log(OR), 0.5 * log(OR))
}
plot_beta_duration <- function(select){
x <- seq(10, 20, by = 0.01)
y <- beta_duration(x, select)
data.frame(x = x,
y = y) %>%
ggplot(aes(x = x, y = y)) +
geom_line() +
ylim(0, 1)
}
# DATA SIMULATION ==============================================================
duration <- c(10, 12, 14, 18, 20)
sex <- factor(c("Female", "Male"))
eta <- function(duration, sex, duration_select, sex_OR, noise_sd){
beta_sex(sex, sex_OR) + beta_duration(duration, duration_select) + rnorm(length(duration), 0, noise_sd)
}
sim_data <- function(durations_type, sex_OR, noise_sd, p_female, n, seed){
set.seed(seed)
data.frame(
duration = sample(duration, n, TRUE),
sex = sample(sex, n, TRUE, c(p_female, 1 - p_female))
) %>%
rowwise() %>%
mutate(eta = eta(duration, sex, durations_type, sex_OR, noise_sd),
p = sigmoid(eta),
cured = sample(0:1, 1, prob = c(1 - p, p)))
}
# DATA SIM PARAMETERS
durations_type <- 4 # See beta_duration for functions
sex_OR <- 3 # Odds of cure for male vs female (ref)
noise_sd <- 1
p_female <- 0.7 # proportion of females in the sample
n <- 500
data <- sim_data(durations_type = 1, # See beta_duration for functions
sex_OR = 3, # Odds of cure for male vs female (ref)
noise_sd = 1,
p_female = 0.7, # proportion of females in the sample
n = 500,
seed = 21874564)
And my model is fitted by:
library(mfp)
model1 <- mfp(cured ~ fp(duration) + sex,
family = binomial(link = "logit"),
data = data)
summary(model1)
For each level of sex (i.e. "Male" or "Female"), I want to find the value of duration that gives me a probability equal to some value frontier <- 0.8.
So far, I can only think of using an approximation using a vector of possibilities:
pred_duration <- seq(10, 20, by = 0.1)
pred <- data.frame(expand.grid(duration = pred_duration,
sex = sex),
p = predict(model1,
newdata = expand.grid(duration = pred_duration,
sex = sex),
type = "response"))
pred[which(pred$p > 0.8), ] %>%
group_by(sex) %>%
summarize(min(duration))
But I am really after an exact solution.
The function uniroot allows you to detect the point at which the output of a function equals 0. If you create a function that takes duration as input, calculates the predicted probability from that duration, then subtracts the desired probability, then this function will have an output of 0 at the desired value of duration. uniroot will find this value for you. If you wrap this process in a little function, it makes it very easy to use:
find_prob <- function(p) {
f <- function(v) {
predict(model1, type = 'response',
newdata = data.frame(duration = v, sex = 'Male')) - p
}
uniroot(f, interval = range(data$duration), tol = 1e-9)$root
}
So, for example, to find the duration that gives an 80% probability, we just do:
find_prob(0.8)
#> [1] 12.86089
To prove that this is the correct value, we can feed it directly into predict to see what the predicted probability will be given sex = male and duration = 12.86089
predict(model1, type = 'response',
newdata = data.frame(sex = 'Male', duration = find_prob(0.8)))
#> 1
#> 0.8

simulating a simple linear model

I'm trying to simulate a simple linear model 100 times and find the LS estimation of B1 from the linear model.
set.seed(123498)
x<-rnorm(z, 0, 1)
e<-rnorm(z, 0 ,2)
y<-0.5 + 2*x + e
model<- lm(y~x)
simulaten=100
z=10
for (i in 1:simulaten){
e<-rnorm(n, 0 ,2)
x<-rnorm(n, 0, 1)
y<-0.5 + 2*x + e
model<- lm(y~x)}
summary(model)
Is that what my for loop is achieving or have i missed the mark?
Here is a replicate solution. I have set n (forgotten in the question) and simulaten to a smaller value.
n <- 100
simulaten <- 4
set.seed(123498)
model_list <- replicate(simulaten, {
e <- rnorm(n, 0, 2)
x <- rnorm(n, 0, 1)
y <- 0.5 + 2*x + e
lm(y ~ x)
}, simplify = FALSE)
model_list
Edit
Several statistics can be obtained from the models list. The coefficients are extracted with function coef applied to each model.
Done with sapply, the returned object is a 2 rows matrix.
betas <- sapply(model_list, coef)
str(betas)
# num [1:2, 1:1000] 0.671 1.875 0.374 2.019 0.758 ...
# - attr(*, "dimnames")=List of 2
# ..$ : chr [1:2] "(Intercept)" "x"
# ..$ : NULL
As for the graph, here is an example. Note that in order for the x axis to reach all the x values, in the first call to hist argument xlim is set to range(betas).
lgd <- c(expression(beta[0]), expression(beta[1]))
hist(betas[1, ], freq = FALSE, col = "lightblue", xlim = range(betas), ylim = c(0, 2.5), xlab = "betas", main = "")
hist(betas[2, ], freq = FALSE, col = "blue", add = TRUE)
legend("top", legend = lgd, fill = c("lightblue", "blue"), horiz = TRUE)
The model is updated in each of the iteration. So the summary is returning the summary output of the last 'model'. We could store it in a list.
# // initialize empty list of length equals length of simulaten
modellst <- vector('list', simulaten)
for(i in seq_len(simulaten)) {
e <- rnorm(n, 0 ,2)
x <- rnorm(n, 0, 1)
y <- 0.5 + 2*x + e
# // assign the model output to the corresponding list element
modellst[[i]] <- lm(y~x)
}

Calculate/approach individual face probabilities of 10-faced dice, based on summed 2-roll dice experiment

I have a biochemistry problem , that can be simplified as a two-roll dice experiment (I think...).
Assume there is an uneven dice with 10 faces, i.e. individual face probabilities are not 1/10. We want to know these probabilities.
The given dataset that we have, however, is a histogram of summed faces of rolling the (same) dice twice. So, the range of the observed bins is 2-20 (2 = 1+1; 3 = 1+2, 2+1, 4 = 2+2, 1+3, 3+1; etc.).
The probabilities of summed faces are the product of the individual probabilities (s: observed probabilities of summed faces; p: probabilities of individual faces) and can be written as follows:
s2 ~ p1^2
s3 ~ 2*p1*p2
s4 ~ 2*p1*p3 + p2^2
s5 ~ 2*p1*p4 + 2*p2*p3
s6 ~ 2*p1*p5 + 2*p2*p4 + p3^2
s7 ~ 2*p1*p6 + 2*p2*p5 + 2*p3*p4
s8 ~ 2*p1*p7 + 2*p2*p6 + 2*p3*p5 + p4^2
s9 ~ 2*p1*p8 + 2*p2*p7 + 2*p3*p6 + 2*p4*p5
s10 ~ 2*p1*p9 + 2*p2*p8 + 2*p3*p7 + 2*p4*p6 + p5^2
s11 ~ 2*p1*p10 + 2*p2*p9 + 2*p3*p8 + 2*p4*p7 + 2*p5*p6
s12 ~ 2*p2*p10 + 2*p3*p9 + 2*p4*p8 + 2*p5*p7 + p6^2
s13 ~ 2*p3*p10 + 2*p4*p9 + 2*p5*p8 + 2*p6*p7
s14 ~ 2*p4*p10 + 2*p5*p9 + 2*p6*p8 + p7^2
s15 ~ 2*p5*p10 + 2*p6*p9 + 2*p7*p8
s16 ~ 2*p6*p10 + 2*p7*p9 + p8^2
s17 ~ 2*p7*p10 + 2*p8*p9
s18 ~ 2*p8*p10 + p9^2
s19 ~ 2*p9*p10
s20 ~ p10^2
In this case there are 20-1=19 known variables, and 10 unknowns, so the system is over-determined. It is also easy to solve by hand using algebra. As far as I can remember: quadratic terms will result in 2 possible solutions per individual face. Probabilities are always positive, so practically there should be one solution. Right?
Is there a way to solve this system in R? I am familiar with linear inverse problems in R, but I don't know how to approach this (quadratic?) system in R.
Here is some code to simulate the problem:
options(stringsAsFactors = FALSE)
library(gtools)
library(dplyr)
dice <- data.frame(face = 1:10)
### functions
split_dice_faces <- function(summed_face){
face_face <- strsplit(x = as.character(summed_face),split = "[/_\\|]")[[1]]
names(face_face) <- c("face1","face2")
as.numeric(face_face)
}
sum_dice_faces <- function(face_face){
sapply(face_face, function(face_face_i){
face1 <- split_dice_faces(face_face_i)[1]
face2 <- split_dice_faces(face_face_i)[2]
sum(c(face1[1], face2[1]))
})
}
simulate_2_rolls <- function(dice_pool){
dice_perm <- data.frame(permutations(n = dim(dice_pool)[1], r = 2, v = as.character(dice_pool$face), repeats.allowed = T ))
dice_perm$face_face <- paste(dice_perm[[1]],"|",dice_perm[[2]], sep = "")
dice_perm$prob <- dice_pool$prob[match(dice_perm[[1]], dice_pool$face)]*dice_pool$prob[match(dice_perm[[2]], dice_pool$face)]
dice_perm$summed_face <- sum_dice_faces(dice_perm$face_face)
dice_perm <- dice_perm %>% arrange(summed_face) %>% select(one_of(c("face_face", "summed_face","prob")))
dice_perm
}
summarise_2_rolls_experiment <- function(simulate_2_rolls_df){
simulate_2_rolls_df %>% group_by(summed_face) %>% summarise(prob = sum(prob))
}
from_face_probs_to_summed_observations <- function(face_probs){
face_probs %>%
data.frame(face = dice$face, prob = .) %>%
simulate_2_rolls() %>%
summarise_2_rolls_experiment() %>%
pull(prob)
}
generate_formulas <- function() {
output <-
dice_sum_probs %>% group_by(summed_face) %>% group_split() %>%
sapply(function(i){
left_hand <- paste("s",i$summed_face[1],sep="")
right_hand <-
sapply(strsplit(i$face_face, "\\|") , function(row){
row_i <- as.numeric(row)
row_i <- row_i[order(row_i)]
row_i <- paste("p",row_i,sep = "")
if(row_i[1] == row_i[2]){
paste(row_i[1],"^2",sep="")
} else {
paste(row_i,collapse="*")
}
})
right_hand <-
paste(sapply(unique(right_hand), function(right_hand_i){
fact <- sum(right_hand == right_hand_i)
if(fact > 1){fact <- paste(fact,"*",sep = "")} else {fact <- ""}
paste(fact,right_hand_i,sep = "")
}), collapse = " + ")
paste(left_hand, "~", right_hand)
})
return(output)
}
to simulate a dataset:
### random individual probabilites
dice_probs <- data.frame(face = dice$face,
prob = runif(n = dim(dice)[1]) %>% (function(x){x / sum(x)}))
dice_probs
### simulate infinite amount of trials, observations expressed as probabilities
dice_sum_probs <- simulate_2_rolls(dice_probs)
dice_sum_probs
### sum experiment outcomes with the same sum
dice_sum_probs_summary <- dice_sum_probs %>% group_by(summed_face) %>% summarise(prob = sum(prob))
### plot, this is the given dataset
with(data = dice_sum_probs_summary, barplot(prob, names.arg = summed_face))
### how to calculate / approach p1, p2, ..., p10?
Thanks!
If we create a multiplication table of the probabilities, outer(p, p) and then sum those over constant values of outer(1:10, 1:10, "+") using tapply we get the following nonlinear regression problem:
nls(prob ~ tapply(outer(p, p), outer(1:10, 1:10, `+`), sum),
dice_sum_probs_summary, algorithm = "port",
start = list(p = sqrt(dice_sum_probs_summary$prob[seq(1, 19, 2)])),
lower = numeric(10), upper = rep(1, 10))
giving:
Nonlinear regression model
model: prob ~ tapply(outer(p, p), outer(1:10, 1:10, `+`), sum)
data: dice_sum_probs_summary
p1 p2 p3 p4 p5 p6 p7 p8 p9 p10
0.06514 0.04980 0.14439 0.06971 0.06234 0.19320 0.09491 0.01237 0.11936 0.18878
residual sum-of-squares: 1.33e-30
which is consistent with
> dice_probs
face prob
1 1 0.06513537
2 2 0.04980455
3 3 0.14438749
4 4 0.06971313
5 5 0.06234477
6 6 0.19319613
7 7 0.09491289
8 8 0.01236557
9 9 0.11936244
10 10 0.18877766
We can alternately express it as follows where X is a matrix of zeros and ones having dimension 19 x 100 such that each row corresponds to a possible outcome of rolling the two dice (i.e. 2:20) and each column corresponds to a pair of indexes from 1:10 and 1:10. An entry equals one if the column pair sums to the sum of the two faces represented by its row and zero otherwise.
g <- c(outer(1:10, 1:10, `+`))
X <- + outer(2:20, g, `==`)
nls(prob ~ X %*% kronecker(p, p), dice_sum_probs_summary, alg = "port",
start = list(p = sqrt(dice_sum_probs_summary$prob[seq(1, 19, 2)])),
lower = numeric(10), upper = rep(1, 10))

Non-linear fitting with nls() is giving me singular gradient matrix at initial parameter estimates. Why?

This is my first attempt at fitting a non-linear model in R, so please bear with me.
Problem
I am trying to understand why nls() is giving me this error:
Error in nlsModel(formula, mf, start, wts): singular gradient matrix at initial parameter estimates
Hypotheses
From what I've read from other questions here at SO it could either be because:
my model is discontinuous, or
my model is over-determined, or
bad choice of starting parameter values
So I am calling for help on how to overcome this error. Can I change the model and still use nls(), or do I need to use nls.lm from the minpack.lm package, as I have read elsewhere?
My approach
Here are some details about the model:
the model is a discontinuous function, a kind of staircase type of function (see plot below)
in general, the number of steps in the model can be variable yet they are fixed for a specific fitting event
MWE that shows the problem
Brief explanation of the MWE code
step_fn(x, min = 0, max = 1): function that returns 1 within the interval (min, max] and 0 otherwise; sorry about the name, I realize now it is not really a step function... interval_fn() would be more appropriate I guess.
staircase(x, dx, dy): a summation of step_fn() functions. dx is a vector of widths for the steps, i.e. max - min, and dy is the increment in y for each step.
staircase_formula(n = 1L): generates a formula object that represents the model modeled by the function staircase() (to be used with the nls() function).
please do note that I use the purrr and glue packages in the example below.
Code
step_fn <- function(x, min = 0, max = 1) {
y <- x
y[x > min & x <= max] <- 1
y[x <= min] <- 0
y[x > max] <- 0
return(y)
}
staircase <- function(x, dx, dy) {
max <- cumsum(dx)
min <- c(0, max[1:(length(dx)-1)])
step <- cumsum(dy)
purrr::reduce(purrr::pmap(list(min, max, step), ~ ..3 * step_fn(x, min = ..1, max = ..2)), `+`)
}
staircase_formula <- function(n = 1L) {
i <- seq_len(n)
dx <- sprintf("dx%d", i)
min <-
c('0', purrr::accumulate(dx[-n], .f = ~ paste(.x, .y, sep = " + ")))
max <- purrr::accumulate(dx, .f = ~ paste(.x, .y, sep = " + "))
lhs <- "y"
rhs <-
paste(glue::glue('dy{i} * step_fn(x, min = {min}, max = {max})'),
collapse = " + ")
sc_form <- as.formula(glue::glue("{lhs} ~ {rhs}"))
return(sc_form)
}
x <- seq(0, 10, by = 0.01)
y <- staircase(x, c(1,2,2,5), c(2,5,2,1)) + rnorm(length(x), mean = 0, sd = 0.2)
plot(x = x, y = y)
lines(x = x, y = staircase(x, dx = c(1,2,2,5), dy = c(2,5,2,1)), col="red")
my_data <- data.frame(x = x, y = y)
my_model <- staircase_formula(4)
params <- list(dx1 = 1, dx2 = 2, dx3 = 2, dx4 = 5,
dy1 = 2, dy2 = 5, dy3 = 2, dy4 = 1)
m <- nls(formula = my_model, start = params, data = my_data)
#> Error in nlsModel(formula, mf, start, wts): singular gradient matrix at initial parameter estimates
Any help is greatly appreciated.
I assume you are given a vector of observations of length len as the ones plotted in your example, and you wish to identify k jumps and k jump sizes. (Or maybe I misunderstood you; but you have not really said what you want to achieve.)
Below I will sketch a solution using Local Search. I start with your example data:
x <- seq(0, 10, by = 0.01)
y <- staircase(x,
c(1,2,2,5),
c(2,5,2,1)) + rnorm(length(x), mean = 0, sd = 0.2)
A solution is a list of positions and sizes of the jumps. Note that I use vectors to store these data, as it will become cumbersome to define variables when you have 20 jumps, say.
An example (random) solution:
k <- 5 ## number of jumps
len <- length(x)
sol <- list(position = sample(len, size = k),
size = runif(k))
## $position
## [1] 89 236 859 885 730
##
## $size
## [1] 0.2377453 0.2108495 0.3404345 0.4626004 0.6944078
We need an objective function to compute the quality of the solution. I also define a simple helper function stairs, which is used by the objective function.
The objective function abs_diff computes the average absolute difference between the fitted series (as defined by the solution) and y.
stairs <- function(len, position, size) {
ans <- numeric(len)
ans[position] <- size
cumsum(ans)
}
abs_diff <- function(sol, y, stairs, ...) {
yy <- stairs(length(y), sol$position, sol$size)
sum(abs(y - yy))/length(y)
}
Now comes the key component for a Local Search: the neighbourhood function that is used to evolve the solution. The neighbourhood function takes a solution and changes it slightly. Here, it will either pick a position or a size and modify it slightly.
neighbour <- function(sol, len, ...) {
p <- sol$position
s <- sol$size
if (runif(1) > 0.5) {
## either move one of the positions ...
i <- sample.int(length(p), size = 1)
p[i] <- p[i] + sample(-25:25, size = 1)
p[i] <- min(max(1, p[i]), len)
} else {
## ... or change a jump size
i <- sample.int(length(s), size = 1)
s[i] <- s[i] + runif(1, min = -s[i], max = 1)
}
list(position = p, size = s)
}
An example call: here the new solution has its first jump size changed.
## > sol
## $position
## [1] 89 236 859 885 730
##
## $size
## [1] 0.2377453 0.2108495 0.3404345 0.4626004 0.6944078
##
## > neighbour(sol, len)
## $position
## [1] 89 236 859 885 730
##
## $size
## [1] 0.2127044 0.2108495 0.3404345 0.4626004 0.6944078
I remains to run the Local Search.
library("NMOF")
sol.ls <- LSopt(abs_diff,
list(x0 = sol, nI = 50000, neighbour = neighbour),
stairs = stairs,
len = len,
y = y)
We can plot the solution: the fitted line is shown in blue.
plot(x, y)
lines(x, stairs(len, sol.ls$xbest$position, sol.ls$xbest$size),
col = "blue", type = "S")
Try DE instead:
library(NMOF)
yf= function(params,x){
dx1 = params[1]; dx2 = params[2]; dx3 = params[3]; dx4 = params[4];
dy1 = params[5]; dy2 = params[6]; dy3 = params[7]; dy4 = params[8]
dy1 * step_fn(x, min = 0, max = dx1) + dy2 * step_fn(x, min = dx1,
max = dx1 + dx2) + dy3 * step_fn(x, min = dx1 + dx2, max = dx1 +
dx2 + dx3) + dy4 * step_fn(x, min = dx1 + dx2 + dx3, max = dx1 +
dx2 + dx3 + dx4)
}
algo1 <- list(printBar = FALSE,
nP = 200L,
nG = 1000L,
F = 0.50,
CR = 0.99,
min = c(0,1,1,4,1,4,1,0),
max = c(2,3,3,6,3,6,3,2))
OF2 <- function(Param, data) { #Param=paramsj data=data2
x <- data$x
y <- data$y
ye <- data$model(Param,x)
aux <- y - ye; aux <- sum(aux^2)
if (is.na(aux)) aux <- 1e10
aux
}
data5 <- list(x = x, y = y, model = yf, ww = 1)
system.time(sol5 <- DEopt(OF = OF2, algo = algo1, data = data5))
sol5$xbest
OF2(sol5$xbest,data5)
plot(x,y)
lines(data5$x,data5$model(sol5$xbest, data5$x),col=7,lwd=2)
#> sol5$xbest
#[1] 1.106396 12.719182 -9.574088 18.017527 3.366852 8.721374 -19.879474 1.090023
#> OF2(sol5$xbest,data5)
#[1] 1000.424

Resources