Area Under the Curve using Simpson's rule in R - r

I would like to compute the Area Under the Curve defined by a set of experimental values. I created a function to calculate an aproximation of the AUC using the Simpson's rule as I saw in this post. However, the function only works when it receives a vector of odd length. How can I modify the code to add the area of the last trapezoid when the input vector has an even length.
AUC <- function(x, h=1){
# AUC function computes the Area Under the Curve of a time serie using
# the Simpson's Rule (numerical method).
# https://link.springer.com/chapter/10.1007/978-1-4612-4974-0_26
# Arguments
# x: (vector) time serie values
# h: (int) temporal resolution of the time serie. default h=1
n = length(x)-1
xValues = seq(from=1, to=n, by=2)
sum <- list()
for(i in 1:length(xValues)){
n_sub <- xValues[[i]]-1
n <- xValues[[i]]
n_add <- xValues[[i]]+1
v1 <- x[[n_sub+1]]
v2 <- x[[n+1]]
v3 <- x[[n_add+1]]
s <- (h/3)*(v1+4*v2+v3)
sum <- append(sum, s)
}
sum <- unlist(sum)
auc <- sum(sum)
return(auc)
}
Here a data example:
smoothed = c(0.3,0.317,0.379,0.452,0.519,0.573,0.61,0.629,0.628,0.613,0.587,0.556,0.521,
0.485,0.448,0.411,0.363,0.317,0.273,0.227,0.185,0.148,0.12,0.103,0.093,0.086,
0.082,0.079,0.076,0.071,0.066,0.059,0.053,0.051,0.052,0.057,0.067,0.081,0.103,
0.129,0.165,0.209,0.252,0.292,0.328,0.363,0.398,0.431,0.459,0.479,0.491,0.494,
0.488,0.475,0.457,0.43,0.397,0.357,0.316,0.285,0.254,0.227,0.206,0.189,0.181,
0.171,0.157,0.151,0.162,0.192,0.239)

One recommended way to handle an even number of points and still achieve precision is to combine Simpson's 1/3 rule with Simpson's 3/8 rule, which can handle an even number of points. Such approaches can be found in (at least one or perhaps more) engineering textbooks on numerical methods.
However, as a practical matter, you can write a code chunk to check the data length and add a single trapezoid at the end, as was suggested in the last comment of the post to which you linked. I wouldn't assume that it is necessarily as precise as combining Simpson's 1/3 and 3/8 rules, but it is probably reasonable for many applications.
I would double-check my code edits below, but this is the basic idea.
AUC <- function(x, h=1){
# AUC function computes the Area Under the Curve of a time serie using
# the Simpson's Rule (numerical method).
# https://link.springer.com/chapter/10.1007/978-1-4612-4974-0_26
# Arguments
# x: (vector) time serie values
# h: (int) temporal resolution of the time serie. default h=1
#jh edit: check for even data length
#and chop off last data point if even
nn = length(x)
if(length(x) %% 2 == 0){
xlast = x[length(x)]
x = x[-length(x)]
}
n = length(x)-1
xValues = seq(from=1, to=n, by=2)
sum <- list()
for(i in 1:length(xValues)){
n_sub <- xValues[[i]]-1
n <- xValues[[i]]
n_add <- xValues[[i]]+1
v1 <- x[[n_sub+1]]
v2 <- x[[n+1]]
v3 <- x[[n_add+1]]
s <- (h/3)*(v1+4*v2+v3)
sum <- append(sum, s)
}
sum <- unlist(sum)
auc <- sum(sum)
##jh edit: add trapezoid for last two data points to result
if(nn %% 2 == 0){
auc <- auc + (x[length(x)] + xlast)/2 * h
}
return(auc)
}
sm = smoothed[-length(smoothed)]
length(sm)
[1] 70
#even data as an example
AUC(sm)
[1] 20.17633
#original odd data
AUC(smoothed)
[1] 20.389

There may be a good reason for you to prefer using Simpson's rule, but if you're just looking for a quick and efficient estimate of AUC, the trapezoid rule is far easier to implement, and does not require an even number of breaks:
AUC <- function(x, h = 1) sum((x[-1] + x[-length(x)]) / 2 * h)
AUC(smoothed)
#> [1] 20.3945

Here, I show example code that uses the Simpson's 1/3 and 3/8 rules in tandem for the numerical integration of data. As always, the usual caveats about the possibility of coding errors or compatibility issues apply.
The output at the end compares the numerical estimates of this algorithm with the trapezoidal rule using R's "integrate" function.
#Algorithm adapted from:
#Numerical Methods for Engineers, Seventh Edition,
#By Chapra and Canale, page 623
#Modified to accept data instead of functional values
#Modified by: Jeffrey Harkness, M.S.
##Begin Simpson's rule function code
simp13 <- function(dat, h = 1){
ans = 2*h*(dat[1] + 4*dat[2] + dat[3])/6
return(ans)}
simp13m <- function(dat, h = 1){
summ <- dat[1]
n <- length(dat)
nseq <- seq(2,(n-2),2)
for(i in nseq){
summ <- summ + 4*dat[i] + 2*dat[i+1]}
summ <- summ + 4*dat[n-1] + dat[n]
result <- (h*summ)/3
return(result)}
simp38 <- function(dat, h = 1){
ans <- 3*h*(dat[1] + 3*sum(dat[2:3]) + dat[4])/8
return(ans)}
simpson = function(dat, h = 1){
hin = h
len = length(dat)
comp <- len %% 2
##number of segments
if(len == 2){
ans = sum(dat)/2*h} ##n = 2 is the trapezoidal rule
if(len == 3){
ans = simp13(dat, h = hin)}
if(len == 4){
ans = simp38(dat,h = hin)}
if(len == 6){
ans <- simp38(dat[1:4],h = hin) + simp13(dat[4:len],h = hin)}
if(len > 6 & comp == 0){
ans = simp38(dat[1:4],h = hin) + simp13m(dat[4:len],h = hin)}
if(len >= 5 & comp == 1){
ans = simp13m(dat,h = hin)}
return(ans)}
##End Simpson's rule function code
This next section of code shows the performance comparison. This code can easily be altered for different test functions and cases.
The precision difference tends to change with the sample size and test function used; this example is not intended to imply that the difference is always this pronounced.
#other algorithm for comparison purposes, from Allan Cameron above
oa <- function(x, h = 1) sum((x[-1] + x[-length(x)]) / 2 * h)
#Testing and algorithm comparison code
simans = NULL; oaans = NULL; simerr = NULL; oaerr = NULL; mp = NULL
for( j in 1:10){
n = j
#f = function(x) cos(x) + 2 ##Test functions
f = function(x) 0.2 + 25*x - 200*x^2 + 675*x^3 - 900*x^4 + 400*x^5
a = 0;b = 10
h = (b-a)/n
datain = seq(a,b,by = h)
preans = integrate(f,a,b)$value #precise numerical estimate of test function
simans[j] = simpson(f(datain), h = h)
oaans[j] = oa(f(datain), h = h)
(simerr[j] = abs(simans[j] - preans)/preans * 100)
(oaerr[j] = abs(oaans[j] - preans)/preans * 100)
mp[j] = simerr[j] < oaerr[j]
}
(outframe = data.frame("simpsons percent diff" = simerr,"trapezoidal percent diff" = oaerr, "more precise?" = mp, check.names = F))
simpsons percent diff trapezoidal percent diff more precise?
1 214.73489738 214.734897 FALSE
2 15.07958148 64.993410 TRUE
3 6.70203621 29.816799 TRUE
4 0.94247384 16.955208 TRUE
5 0.54830021 10.905620 TRUE
6 0.18616767 7.593825 TRUE
7 0.12051767 5.588209 TRUE
8 0.05890462 4.282980 TRUE
9 0.04087107 3.386525 TRUE
10 0.02412733 2.744500 TRUE

Related

Improved inverse transform method for Poisson random variable generation in R

I am reading Section 4.2 in Simulation (2006, 4ed., Elsevier) by Sheldon M. Ross, which introducing generating a Poisson random variable by the inverse transform method.
Denote pi =P(X=xi)=e^{-λ} λ^i/i!, i=0,1,... and F(i)=P(X<=i)=Σ_{k=0}^i pi to be the PDF and CDF for Poisson, respectively, which can be computed via dpois(x,lambda) and ppois(x,lambda) in R.
There are two inverse transform algorithms for Poisson: the regular version and the improved one.
The steps for the regular version are as follows:
Simulate an observation U from U(0,1)​.
Set i=0​ and ​F=F(0)=p0=e^{-λ}​.
If U<F​, select ​X=​i and terminate.
If U >= F​, obtain i=i+1, F=F+pi​ and return to the previous step.
I write and test the above steps as follows:
### write the regular R code
pois_inv_trans_regular = function(n, lambda){
X = rep(0, n) # generate n samples
for(m in 1:n){
U = runif(1)
i = 0; F = exp(-lambda) # initialize
while(U >= F){
i = i+1; F = F + dpois(i,lambda) # F=F+pi
}
X[m] = i
}
X
}
### test the code (for small λ, e.g. λ=3)
set.seed(0); X = pois_inv_trans_regular(n=10000,lambda=3); c(mean(X),var(X))
# [1] 3.005000 3.044079
Note that the mean and variance for Poisson(λ) are both λ, so the writing and testing for the regular code are making sense!
Next I tried the improved one, which is designed for large λ and described according to the book as follows:
The regular algorithm will need to make 1+λ searches, i.e. O(λ) computing complexity, which is fine when λ is small, while it can be greatly improved upon when λ is large.
Indeed, since a Poisson random variable with mean λ is most likely to take on one of the two integral values closest to λ , a more efficient algorithm would first check one of these values, rather than starting at 0 and working upward. For instance, let I=Int(λ) and recursively determine F(I).
Now generate a Poisson random variable X with mean λ by generating a random number U, noting whether or not X <= I​ by seeing whether or not ​U <= F(I)​. Then search downward starting from ​I​ in the case where X <= I​ and upward starting from ​I+1​ otherwise.
It is said that the improved algorithm only need 1+0.798√λ searches, i.e., having O(√λ) complexity.
I tried to wirte the R code for the improved one as follows:
### write the improved R code
pois_inv_trans_improved = function(n, lambda){
X = rep(0, n) # generate n samples
p = function(x) {dpois(x,lambda)} # PDF: p(x) = P(X=x) = λ^x exp(-λ)/x!
F = function(x) {ppois(x,lambda)} # CDF: F(x) = P(X ≤ x)
I = floor(lambda) # I=Int(λ)
F1 = F(I); F2 = F(I+1) # two close values
for(k in 1:n){
U = runif(1)
i = I
if ( F1 < U & U <= F2 ) {
i = I+1
}
while (U <= F1){ # search downward
i = i-1; F1 = F1 - p(i)
}
while (U > F2){ # search upward
i = i+1; F2 = F2 + p(i)
}
X[k] = i
}
X
}
### test the code (for large λ, e.g. λ=100)
set.seed(0); X = pois_inv_trans_improved(n=10000,lambda=100); c(mean(X),var(X))
# [1] 100.99900000 0.02180118
From the simulation results [1] 100.99900000 0.02180118 for c(mean(X),var(X)), which shows nonsense for the variance part. What should I remedy this issue?
The main problem was that F1 and F2 were modified within the loop and not reset, so eventually a very wide range of U's are considered to be in the middle.
The second problem was on the search downward the p(i) used should be the original i, because F(x) = P(X <= x). Without this, the code hangs for low U.
The easiest fix for this is to start i = I + 1. Then "in the middle" if statement isn't needed.
pois_inv_trans_improved = function(n, lambda){
X = rep(0, n) # generate n samples
p = function(x) {dpois(x,lambda)} # PDF: p(x) = P(X=x) = λ^x exp(-λ)/x!
`F` = function(x) {ppois(x,lambda)} # CDF: F(x) = P(X ≤ x)
I = floor(lambda) # I=Int(λ)
F1 = F(I); F2 = F(I+1) # two close values
for(k in 1:n){
U = runif(1)
i = I + 1
# if ( F1 < U & U <= F2 ) {
# i = I + 1
# }
F1tmp = F1
while (U <= F1tmp){ # search downward
i = i-1; F1tmp = F1tmp - p(i);
}
F2tmp = F2
while (U > F2tmp){ # search upward
i = i+1; F2tmp = F2tmp + p(i)
}
X[k] = i
}
X
}
This gives:
[1] 100.0056 102.2380

Comparison of two vectors resulted after simulation

I would like to apply the Rejection sampling method to simulate a random vector Y=(Y_1, Y_2) of a uniform distribution from a unit disc D = { (X_1 , X_2) \in R^2: \sqrt{x^2_1 + x^2_2} ≤ 1} such that X = (X_1 , X_ 2) is random vector of a uniform distribution in the square S = [−1, 1]^2 and the joint density f(y_1,y_2) = \frac{1}{\pi} 1_{D(y_1,y_2)}.
In the rejection method, we accept a sample generally if f(x) \leq C * g(x). I am using the following code to :
x=runif(100,-1,1)
y=runif(100,-1,1)
d=data.frame(x=x,y=y)
disc_sample=d[(d$x^2+d$y^2)<1,]
plot(disc_sample)
I have two questions:
{Using the above code, logically, the size of d should be greater than the size of disc_sample but when I call both of them I see there are 100 elements in each one of them. How could this be possible. Why the sizes are the same.} THIS PART IS SOLVED, thanks to the comment below.
The question now
Also, how could I reformulate my code to give me the total number of samples needed to get 100 samples follow the condition. i.e to give me the number of samples rejected until I got the 100 needed sample?
Thanks to the answer of r2evans but I am looking to write something simpler, a while loop to store all possible samples inside a matrix or a data frame instead of a list then to call from that data frame just the samples follow the condition. I modified the code from the answer without the use of the lists and without sapply function but it is not giving the needed result, it yields only one row.
i=0
samps <- data.frame()
goods <- data.frame()
nr <- 0L
sampsize <- 100L
needs <- 100L
while (i < needs) {
samps <- data.frame(x = runif(1, -1, 1), y = runif(1, -1, 1))
goods <- samps[(samps$x^2+samps$y^2)<1, ]
i = i+1
}
and I also thought about this:
i=0
j=0
samps <- matrix()
goods <- matrix()
needs <- 100
while (j < needs) {
samps[i,1] <- runif(1, -1, 1)
samps[i,2] <- runif(1, -1, 1)
if (( (samps[i,1])**2+(samps[i,2])**2)<1){
goods[j,1] <- samps[i,1]
goods[j,2] <- samps[i,2]
}
else{
i = i+1
}
}
but it is not working.
I would be very grateful for any help to modify the code.
As to your second question ... you cannot reformulate your code to know precisely how many it will take to get (at least) 100 resulting combinations. You can use a while loop and concatenate results until you have at least 100 such rows, and then truncate those over 100. Because using entropy piecewise (at scale) is "expensive", you might prefer to always over-estimate the rows you need and grab all at once.
(Edited to reduce "complexity" based on homework constraints.)
set.seed(42)
samps <- vector(mode = "list")
goods <- vector(mode = "list")
nr <- 0L
iter <- 0L
sampsize <- 100L
needs <- 100L
while (nr < needs && iter < 50) {
iter <- iter + 1L
samps[[iter]] <- data.frame(x = runif(sampsize, -1, 1), y = runif(sampsize, -1, 1))
rows <- (samps[[iter]]$x^2 + samps[[iter]]$y^2) < 1
goods[[iter]] <- samps[[iter]][rows, ]
nr <- nr + sum(rows)
}
iter # number of times we looped
# [1] 2
out <- head(do.call(rbind, goods), n = 100)
NROW(out)
# [1] 100
head(out) ; tail(out)
# x y
# 1 0.8296121 0.2524907
# 3 -0.4277209 -0.5668654
# 4 0.6608953 -0.2221099
# 5 0.2834910 0.8849114
# 6 0.0381919 0.9252160
# 7 0.4731766 0.4797106
# x y
# 221 -0.65673577 -0.2124462
# 231 0.08606199 -0.7161822
# 251 -0.37263236 0.1296444
# 271 -0.38589120 -0.2831997
# 28 -0.62909284 0.6840144
# 301 -0.50865171 0.5014720

Keep getting 'unused argument' error when trying to produce a qqplot in R

library(GoFKernel)
library(ggplot2)
rejection_fx_sqz <- function(n){
x <- vector() # the output vector in which simulated values should be stored
acpt <- 0 # count the accepted values
tol <- 0 # count the total number of values (accepted or not accepted)
len_x = 0
while(len_x < n){
n_to_gen = max((n-len_x)/0.69,20) # determine number to generate - not less than 20
tol = tol + n_to_gen # count the total number of values simulated
u1 = runif(n_to_gen) # simulate u1
u2 = runif(n_to_gen) # simulate u2
y = inv_G(u2)
g <- g_x(y)
d <- g*y*(5-y)
condU <- (M*u1) >= 1/d
condL <- !condU
condL[condL] <- (M*u1[condL]) <= lower(y[condL])/d[condL]
other <- !(condU | condL) # condition of below the W_U and above W_L
# modify condL because some samples can still be accepted given condL is false
condL[other] <- u1[other] <= fstar(y[other])/(M*g[other])
cond <- condL
acpt = acpt + sum(cond) # count the number of accepted values
x <- c(x, y[cond]) # add accepted values to the output vector
len_x <- length(x)
}
p = acpt / tol
return(list(x=x[1:n], p=p))
}
n=100000
x=rejection_fx_sqz(n) # a function that simulates from f(x) by generating n samples
x_fx <- data.frame(x=x$x)
x=x_fx$x
x_plot = cbind(x_fx, fy = 1/(I*x*(5-x))*exp(-1/8*(-1+log(x/(5-x)))^2))
f_cdf <- function(x) {
integrate(fstar, 0, x)$value/I
}
# quantile function, inverse cdf
f_q <- inverse(f_cdf, lower=0.000000000000000001, upper=4.999999999999999999)
ggplot(x_plot, aes(sample=x))+
labs(title="Empirical against theoretical quantiles")+
stat_qq(distribution=f_q) +
stat_qq_line(distribution=f_q)
What I'm trying to do is to produce a 'quantile-quantile' diagnostic plot for my algorithm for simulating from f(x). The problem is that I keep getting two error messages which says:
Computation failed in stat_qq(): unused argument (p = quantiles)
Computation failed in stat_qq_line(): unused argument (p = quantiles)
I am beginner for r-language and this is driving me crazy. Any help is appreciated.

Markowitz model / portfolio optimization using local search in R

I am taking baby steps to use metaheuristics for solving constrained optimization problems. I am trying to solve basic Markowitz Mean-Variance optimization model (given below) using NMOFpackage in R.
Min
lambda * [sum{i=1 to N}sum{j = 1 to N}w_i*w_i*Sigma_ij] - (1-lambda) * [sum{i=1 to N}(w_i*mu_i)]
subject to
sum{i=1 to N}{w_i} = 1
0 <= w_i <= 1; i = 1,...,N
where, lambda takes values between 0 and 1, N is number of assets.
Following is my code (Based on Book: Numerical Methods and Optimization in Finance):
library(NMOF)
na <- dim(fundData)[2L]
ns <- dim(fundData)[1L]
Sigma <- cov(fundData)
winf <- 0.0
wsup <- 1.0
m <- colMeans(fundData)
resample <- function(x,...) x[sample.int(length(x),...)]
data <- list(R = t(fundData),
m = m,
na = dim(fundData)[2L],
ns = dim(fundData)[1L],
Sigma = Sigma,
eps = 0.5/100,
winf = winf,
wsup = wsup,
nFP = 100)
w0 <- runif(data$na); w0 <- w0/sum(w0)
OF <- function(w,data){
wmu <- crossprod(w,m)
res <- crossprod(w, data$Sigma)
res <- tcrossprod(w,res)
result <- res - wmu
}
neighbour <- function(w, data){
toSell <- w > data$winf
toBuy <- w < data$wsup
i <- resample(which(toSell), size = 1L)
j <- resample(which(toBuy), size = 1L)
eps <- runif(1) * data$eps
eps <- min(w[i] - data$winf, data$wsup - w[j], eps)
w[i] <- w[i] - eps
w[j] <- w[j] + eps
w
}
algo <- list(x0 = w0, neighbour = neighbour, nS = 5000L)
system.time(sol1 <- LSopt(OF, algo, data))
I am not sure how to include lambda in the objective function (OF). The above code does not include lambda in OF. I tried using for loop but it resulted in following error:
OF <- function(w,data){
lambdaSeq <- seq(.001,0.999, length = data$nFP)
for(lambda in lambdaSeq){
wmu <- crossprod(w,m)
res <- crossprod(w, data$Sigma)
res <- tcrossprod(w,res)
result <- lambda*res - (1-lambda)*wmu
}
}
Error:
Local Search.
Initial solution:
| | 0%
Error in if (xnF <= xcF) { : argument is of length zero
Timing stopped at: 0.01 0 0.03
It would be nice if someone could help me in this regard.
P.S: I am also aware that this can be solved using quadratic programming. This is just an initiation to include other constraints.
If I understand correctly, you want to replicate the mean--variance efficient frontier by Local Search? Then you need to run a Local Search for every value of lambda that you want to include in the frontier.
The following example should help you get going. I start by attaching the package and setting up the list data.
require("NMOF")
data <- list(m = colMeans(fundData), ## expected returns
Sigma = cov(fundData), ## expected var of returns
na = dim(fundData)[2L], ## number of assets
eps = 0.2/100, ## stepsize for LS
winf = 0, ## minimum weight
wsup = 1, ## maximum weight
lambda = 1)
Next I compute a benchmark for the minimum-variance case (i.e. lambda equals one).
## benchmark: the QP solution
## ==> this will only work with a recent version of NMOF,
## which you can get by saying:
## install.packages('NMOF', type = 'source',
## repos = c('http://enricoschumann.net/R',
## getOption('repos')))
##
require("quadprog")
sol <- NMOF:::minvar(data$Sigma, 0, 1)
Objective function and neighbourhood function. I have slightly simplified both functions (for clarity; using crossprod in the objective function would probably be more efficient).
OF <- function(w, data){
data$lambda * (w %*% data$Sigma %*% w) -
(1 - data$lambda) * sum(w * data$m)
}
neighbour <- function(w, data){
toSell <- which(w > data$winf)
toBuy <- which(w < data$wsup)
i <- toSell[sample.int(length(toSell), size = 1L)]
j <- toBuy[sample.int(length(toBuy), size = 1L)]
eps <- runif(1) * data$eps
eps <- min(w[i] - data$winf, data$wsup - w[j], eps)
w[i] <- w[i] - eps
w[j] <- w[j] + eps
w
}
Now we can run Local Search. Since it is a fairly large dataset (200 assets),
you will need a relatively large number of steps to reproduce the QP solution.
w0 <- runif(data$na) ## a random initial solution
w0 <- w0/sum(w0)
algo <- list(x0 = w0, neighbour = neighbour, nS = 50000L)
sol1 <- LSopt(OF, algo, data)
You can compare the weights you get from Local Search with the QP solution.
par(mfrow = c(3,1), mar = c(2,4,1,1), las = 1)
barplot(sol, main = "QP solution")
barplot(sol1$xbest, main = "LS solution")
barplot(sol - sol1$xbest,
ylim = c(-0.001,0.001)) ## +/-0.1%
Finally, if you want to compute the whole frontier, you need to rerun this code for different levels of data$lambda.

fitting a 5th order Bézier Curve to a data set

This is a slightly specific problem, so a bit of knowledge of R and of Bézier curves is required to be of help... (thanks if you do!!)
So I need some help with my R code: I have a series of discretely sampled observations and I am trying to fit a Bézier Curve of the 5th order through these points with simple LSS regression. I have some limitations on the position of the 6 control points:
A & B have the same Y-axis coordinate
B & C have the same X-axis coordinate
C & D have the same Y-axis coordinate
D & E have the same X-axis coordinate
E & F have the same Y-axis coordinate
A is located on the observation 2 turning points ago from the last
observation
The X-axis coordinate of the last observation is
somewhere between the X-axis coordinates of E and F
Like this image:
Say I have these data:
-0.01105
-0.01118
-0.01271
-0.01479
-0.01729
-0.01996
-0.02250
-0.02473
-0.02554
-0.02478
-0.02207
-0.01788
-0.01319
-0.00956
They have a "curvy" shape so a Bézier curve would fit: the result of my code is this image: the data are in red, the 5th order Bézier and its control points with their restrictions in blue:
Like this image:
So you see that I have some kind of solution, but this is the problem:
The X-axis location of right-most control point is always to the right of the last input data point, and to get an appropriate fit, I had to require a value of t (t goes from 0 to 1 in a Bézier) where t is at if the input data end (the "limit" variable in my code). How do I rewrite it so I don't have to do that anymore, and the horizontal spread of the t-values remains constant, also outside of the input data?
(given the restrictions on the control points, and maximizing the fit of the part of the curve that overlaps with the input data)
If you can help, please take a look at this R code, any help is .. much much appreciated and happy holidays!!
ps: what I call exampledata.csv in my code is just the data above.
getT <- function(x){
# Calculates length from origin of each point in the path.
# args:
# x : a one dimensional vector
# Returns:
# out : a vector of distances from the origin, as a percent of end point - start point distance
out <- cumsum(abs(diff(x)))
out <- c(0, out/ out[length(out)])
return(out)
}
cost_f <- function(X,Y,K){
pred <-K%*%X
c <- Y- pred
out <- list(loss= as.vector(t(c)%*%c), pred = pred)
return(out)
}
df <- read.csv('exampledata.csv')
T <- nrow(df)
df['d'] = 1:T
# # identify all turning points:
# turn_point <- c(1)
# for(i in 2:(T-1)){
# if( ( (df[i,'x'] < df[i-1,'x']) & (df[i,'x'] < df[i+1,'x'])) | ( (df[i,'x'] > df[i-1,'x']) & (df[i,'x'] > df[i+1,'x'])) ){
# turn_point <- c(turn_point, i)
# }
# }
fit_last_piece <- function(df){
limit <- .79
turn_point <- c(1)
for(i in 2:(T-1)){
if( ( (df[i,'x'] < df[i-1,'x']) & (df[i,'x'] < df[i+1,'x'])) | ( (df[i,'x'] > df[i-1,'x']) & (df[i,'x'] > df[i+1,'x'])) ){
turn_point <- c(turn_point, i)
}
}
nk <- length(turn_point) # number of turning points
data <- df[turn_point[nk-1]:nrow(df),]
end_x <- data$d[1]
end_y <- data$x[1]
constr_x <- matrix(c(1,0,0,0,0,0, # remember data is input column to column
0,1,1,0,0,0,
0,0,0,1,1,0,
0,0,0,0,0,1),nrow = 6, ncol = 4)
constr_y <- matrix(c(1,1,0,0,0,0,
0,0,1,1,0,0,
0,0,0,0,1,1),nrow = 6, ncol = 3)
M = matrix(c(-1,5,-10,10,-5,1,
5,-20,30,-20,5,0,
-10,30,-30,10,0,0,
10,-20,10,0,0,0,
-5,5,0,0,0,0,
1,0,0,0,0,0),nrow = 6, ncol = 6)
t_x = getT(data$d)*limit
T_x = cbind(t_x^5, t_x^4 ,t_x^3, t_x^2, t_x,rep(1,length(t_x)))
in_par <- ( tail(data$d,1)-data$d[1])*c(2/5,4/5,6/5) + data$d[1] # initial values of the intermediate x levels are at 1/3 and 2/3 midpoints
res_x <- optim(par = in_par, fn = function(par){cost_f(c(data$d[1], par[1],par[2], par[3]), data$d, T_x%*%M%*%constr_x)$loss})
#res_x <- optimize(f = function(par){cost_f(c(df$d[1],par,df$d[nrow(df)]), df$d, T_x%*%M%*%constr_x)$loss}, interval = c(df$d[1],df$d[nrow(df)]),tol = .Machine$double.eps^0.25)
optim_x <- c(data$d[1],res_x$par)
pred_x <- cost_f(optim_x, data$d, T_x%*%M%*%constr_x)$pred
t_y = getT(data$x)*limit
T_y = cbind(t_y^5, t_y^4,t_y^3, t_y^2, t_y,rep(1,length(t_y)))
in_par <- c()
res_y <- optim(par = c(data$x[floor(nrow(data)/2)],tail(data$x,1)), fn = function(par){cost_f(c(data$x[1],par[1],par[2]), data$x, T_y%*%M%*%constr_y)$loss})
optim_y <- c(data$x[1],res_y$par[1],res_y$par[2])
#pred_y <- cost_f(res_y$par, df$x, T_y%*%M%*%constr_y)$pred
pred_y <- cost_f(optim_y, data$x, T_y%*%M%*%constr_y)$pred
t_x_p <- c(t_x,seq(tail(t_x,1),1,length.out = 10))
T_x_p <- cbind(t_x_p^5, t_x_p^4 ,t_x_p^3, t_x_p^2, t_x_p,rep(1,length(t_x_p)))
t_y_p <- c(t_y,seq(tail(t_y,1),1,length.out = 10))
T_y_p <- cbind(t_y_p^5, t_y_p^4 ,t_y_p^3, t_y_p^2, t_y_p,rep(1,length(t_y_p)))
pred_x <- T_x_p%*%M%*%constr_x%*%optim_x
pred_y <- T_y_p%*%M%*%constr_y%*%optim_y
# this part is new:
plot(pred_x,pred_y, ylim = c(min(c(data$x, pred_y,res_y$par)), max(c(data$x, pred_y,res_y$par))),col="blue",type="b")
points(data$d,data$x,col = 'red',type="b")
points(pred_x[1],pred_y[1],pch=20,col='blue')
points(res_x$par[1],pred_y[1],pch=20,col='blue')
points(res_x$par[1],res_y$par[1],pch=20,col='blue')
points(res_x$par[2],res_y$par[1],pch=20,col='blue')
points(res_x$par[2],res_y$par[2],pch=20,col='blue')
points(res_x$par[3],res_y$par[2],pch=20,col='blue')
segments(pred_x[1],pred_y[1],res_x$par[1],pred_y[1],lty=3,col='blue')
segments(res_x$par[1],pred_y[1],res_x$par[1],res_y$par[1],lty=3,col='blue')
segments(res_x$par[1],res_y$par[1],res_x$par[2],res_y$par[1],lty=3,col='blue')
segments(res_x$par[2],res_y$par[1],res_x$par[2],res_y$par[2],lty=3,col='blue')
segments(res_x$par[2],res_y$par[2],res_x$par[3],res_y$par[2],lty=3,col='blue')
}
fit_last_piece(df)

Resources