Conversion to pinescript v4 - global-variables

I am trying to convert the following function into pinescript v4.0:
ssFilter( price, lowerBand ) =>
angle = sqrt(2)*PI/lowerBand
a1= exp(-angle)
b1 = 2*a1*cos(angle)
c2 = b1
c3 = -a1*a1
c1 = 1 - c2 -c3
filt := c1*(price + nz(price[1]))/2 + c2*nz(filt[1]) + c3*nz(filt[2])
But, it returns with an error:
Cannot modify global variable 'filt' in function

Your function should be like this
ssFilter(price, lowerBand) =>
angle = sqrt(2)*PI/lowerBand
a1= exp(-angle)
b1 = 2*a1*cos(angle)
c2 = b1
c3 = -a1*a1
c1 = 1 - c2 -c3
c1*(price + nz(price[1]))/2 + c2*nz(filt[1]) + c3*nz(filt[2])
and then in your main code, you should use your function to assign the result to filt, like this:
filt := ssFilter(price, lowerBand)

Related

Every time variable increases, create a new sample using a for loop

I need to do the following: I would like to increase variable pi1 from -1 to 0 by 0.0001 steps under the condition that all the other variables stay the same. As a second step I need to select 1000 samples for each different pi1 value. In the end, I would need to measure the biasedness of both regressions to the real value. After a thorough investigation, I really don't see why I both loops are not working.
This sort of an idea how it could work
index <- seq(1, 1000)
beta_OLS <- NULL
beta_IV <- NULL
for(i in seq(from = -1, to = 0, by = 0.001)) {
for(k in index) {
n <- 2000
pi1 <- i
b0 <- 0
b1 <- 0
b2 <- -1/1000
b3 <- 1/5
z <- runif(n, 0, 25)
ov <- rnorm(n, 0, 1)
d <- -1/2 + pi1 * z + 1/2 * ov + rnorm(n, 0, 1) > 0
y <- b0 + b1 * d + b2 * z + b3 * ov + rnorm(n, 0, 1/10)
#OLS Regression
model12 = lm(y ~ d, data = data)
beta_OLS[k] = model12$coefficients[2]
#IV Regression
model12_1 = ivreg(y ~ d | z, data=data)
beta_IV[k] = model12_1$coefficients[2]
}
}
real_value <- - 1/1000
average_OLS <- mean(beta_OLS)
average_IV <- mean(beta_IV)
biased_OLS <- average_OLS - real_value
biased_IV <- average_IV - real_value
biased_OLS
biased_IV
Did a few alterations here and it ran. Not sure if the result are right.
Since both loops had the same count, eliminated one of them.
Also attributed the results of d and y on every run to a dataframe called data (that you used as source for your regressions).
index = seq(1,1000)
beta_OLS = NULL
beta_IV = NULL
i = -1
for(k in index){
n <- 2000
pi1 <- i
b0 <- 0
b1 <- 0
b2 <- -1/1000
b3 <- 1/5
z <- runif(n,0,25)
ov <- rnorm(n,0,1)
d <- -1/2 + pi1 * z + 1/2 * ov + rnorm(n,0,1) > 0
y <- b0 + b1 * d + b2 * z + b3 * ov + rnorm(n,0,1/10)
data = as.data.frame(cbind(y,d))
#OLS Regression
model12 = lm(y ~ d, data = data)
beta_OLS[k] = model12$coefficients[2]
#IV Regression
model12_1 = ivreg::ivreg(y ~ d | z, data=data)
beta_IV[k] = model12_1$coefficients[2]
pi1 <- i + 0.001
}
real_value = - 1/1000
average_OLS = mean(beta_OLS)
average_IV = mean(beta_IV)
biased_OLS = average_OLS - real_value
biased_IV = average_IV - real_value
biased_OLS
biased_IV

Equivalent of handle function in R

I'm moving some code from Matlab to R and I'm facing some difficulties in a particular case of the handle function. This is my Matlab code:
function Application_ChFun
clear;close all;clc;warning('off');
StepsYr = 10;
%% --parameters-- %%
S0 = 1;
r = 0.0;
t0 = 0;
T2 = 5;
gamma = 0.5;
kappa = 0.3;
rho = -0.6;
vBar = 0.05;
v0 = 0.04;
NoOfPaths = 5e4;
NoOfSteps = StepsYr*T2;
%% --Define model-- %%
cf = #(u,T)ChFun(u, T, kappa,vBar,gamma,rho, v0, r);
Vc = #(t,x)MktFun(cf,t,x,log(S0));
% Define bump size
bump_T = 1e-4;
bump_K = #(T)1e-4;
% Define derivatives
dC_dT = #(T,K) (Vc(T + bump_T,K) - Vc(T ,K)) / bump_T;
dC_dK = #(T,K) (Vc(T,K + bump_K(T)) - Vc(T,K - bump_K(T))) / (2 * bump_K(T));
d2C_dK2 = #(T,K) (Vc(T,K + bump_K(T)) + Vc(T,K-bump_K(T)) - 2*Vc(T,K)) / bump_K(T)^2;
t = t0;
S = S0+zeros(NoOfPaths,1);
for i = 1:NoOfSteps
if i==1
t_adj = 1/NoOfSteps;
t = t_adj;
end
% AAA perfectly matches with the R equivalent, but AAB and AAC do not.
AAA = dC_dT(t,S);
AAB = dC_dK(t,S);
AAC = d2C_dK2(t,S);
end
function value = MktFun(cf,T,x,x0)
value = CM_Proxy(cf,T,x,x0);
function value = CM_Proxy(ChF,T,K,x0)
K(K<1e-5)=1e-5;
alpha = 0.75;
c = 3e2;
N_CM = 2^12;
eta = c/N_CM;
b = pi/eta;
u = [0:N_CM-1]*eta;
lambda = 2*pi/(N_CM*eta);
i = complex(0,1);
u_new = u-(alpha+1)*i;
cf = exp(i*u_new*x0).*ChF(u_new,T);
psi = cf./(alpha^2+alpha-u.^2+i*(2*alpha+1)*u);
SimpsonW = 3+(-1).^[1:N_CM]-[1,zeros(1,N_CM-1)];
SimpsonW(N_CM) = 0;
SimpsonW(N_CM-1) = 1;
FFTFun = exp(i*b*u).*psi.*SimpsonW;
payoff = real(eta*fft(FFTFun)/3);
strike = exp(-b:lambda:b-lambda);
payoff_specific = spline(strike,payoff,K);
value = exp(-log(K)*alpha).*payoff_specific/pi;
function cf=ChFun(u, tau, kappa,vBar,gamma,rho, v0, r)
i = complex(0,1);
D_1 = sqrt(((kappa -i*rho*gamma.*u).^2+(u.^2+i*u)*gamma^2));
g = (kappa- i*rho*gamma*u-D_1)./(kappa-i*rho*gamma*u+D_1);
C = (1/gamma^2)*(1-exp(-D_1*tau))./(1-g.*exp(-D_1*tau)).*(kappa-gamma*rho*i*u-D_1);
A = i*u*r*tau + kappa*vBar*tau/gamma^2 * (kappa-gamma*rho*i*u-D_1)-2*kappa*vBar/gamma^2*log((1-g.*exp(-D_1*tau))./(1-g));
cf = exp(A + C * v0);
where MktFun is a standard function. When g=dC_dK(t,S) is called first of all bump_K(T) is evaluated and then Vc(T,K + bump_K(T)) and Vc(T,K-bump_K(T)).
In R I have the following:
Application_ChFun <- function(){
StepsYr = 10
## --parameters-- ##
S0 = 1
r = 0.0
t0 = 0
T2 = 5
gamma = 0.5
kappa = 0.3
rho = -0.6
vBar = 0.05
v0 = 0.04
NoOfPaths = 5e4
NoOfSteps = StepsYr*T2
## --Define model-- ##
cf <- function(u,T) ChFun(u,T,kappa,vBar,gamma,rho, v0, r)
Vc <- function(t,x) MktFun(cf,t,x,log(S0))
# Define bump size
bump_T = 1e-4
bump_K <- function(T) 1e-4
# Define derivatives
dC_dT <- function(T,K) (Vc(T + bump_T,K) - Vc(T ,K)) / bump_T
dC_dK <- function(T,K) (Vc(T,K + bump_K(T)) - Vc(T,K - bump_K(T))) / (2 * bump_K(T))
d2C_dK2 <- function(T,K) (Vc(T,K + bump_K(T)) + Vc(T,K - bump_K(T)) - 2*Vc(T,K)) / bump_K(T)^2
t = t0
S = S0+rep(0,NoOfPaths)
for (i in 1:NoOfSteps){
t_real = t
if (i==1){
t_adj = 1/NoOfSteps;
t = t_adj
}
# AAA perfectly matches with the R's equivalent. But AAB and AAC do not.
AAA = dC_dT(t,S)
AAB = dC_dK(t,S)
AAC = d2C_dK2(t,S)
}
}
MktFun <- function(cf,T,x,x0){
return(CM_Proxy(cf,T,x,x0))
}
CM_Proxy <- function(ChF,T,K,x0){
K[K<1e-5] = 1e-5
alpha = 0.75
c = 3e2
N_CM = 2^12
eta = c/N_CM
b = pi/eta
u = (0:(N_CM-1))*eta
lambda = 2*pi/(N_CM*eta)
i = complex(real = 0, imaginary = 1)
u_new = u - (alpha+1)*i # European call option.
cf = exp(i*u_new*x0)*ChF(u_new,T)
psi = cf/(alpha^2+alpha-u^2+i*(2*alpha+1)*u)
SimpsonW = 3+(-1)^(1:N_CM)-c(1,rep(0,N_CM-1))
SimpsonW[N_CM] = 0
SimpsonW[N_CM-1] = 1
FFTFun = exp(i*b*u)*psi*SimpsonW
payoff = Re(eta*fft(FFTFun)/3)
strike = exp(seq(-b,b-lambda,lambda))
K = as.vector(K)
payoff_specific = stinepack::stinterp(strike,payoff,K)
value = exp(-log(K)*alpha)*payoff_specific$y/pi
return(value)
}
ChFun <- function(u, tau, kappa,vBar,gamma,rho, v0, r){
i = complex(real = 0, imaginary = 1)
D_1 = sqrt(((kappa - i*rho*gamma*u)^2 + (u^2+i*u)*gamma^2))
g = (kappa - i*rho*gamma*u - D_1) / (kappa - i*rho*gamma*u + D_1)
C = (1/gamma^2)*(1-exp(-D_1*tau))/(1-g*exp(-D_1*tau))*(kappa-gamma*rho*i*u-D_1)
A = i*u*r*tau + kappa*vBar*tau/gamma^2 * (kappa-gamma*rho*i*u-D_1) +
-2*kappa*vBar/gamma^2*log((1-g*exp(-D_1*tau))/(1-g))
cf = exp(A + C * v0)
return(cf)
}
The problem is that in this case is that g=dC_dK(t,S) calls directly Vc instead of calling bump_k(T) first. Could someone please suggest a solution?
The evaluation order of functions is not necessarily inside-out (as it appears you are expected) so much as the order as needed. R tries to do things lazily, so if you include an expensive operation that is never actually referenced, it is not realized.
Take this example:
f1 <- function(a) { message("f1"); a + 1; }
f2 <- function(b) { message("f2"); f1(b) + 2; }
f3 <- function(d) { message("f3"); f2(f1(d) + 3) / f2(f1(d) + 4); }
f3(2)
# f3
# f2
# f1
# f1
# f2
# f1
# f1
# [1] 0.9
When f3 is called, the calls to f2 are the next to be evaluated. When f2 is first called (with f1(d)+3), f2 is called with the unevaluated argument. Once f2 tries to use its b, only then is it evaluated and f1 is called.
If I look at the call stack on the first call to f1, then we see:
Browse[2]> where
where 1 at #1: f1(b)
where 2 at #1: f2(f1(d) + 3)
where 3 at #1: f3(2)
showing the order of functions is f3 called first, then f2, then from there f1.

Simplifying R code when fitting data (for loops)

I have working code to accomplish my goal, but as you will see it is not very elegant. I've tried writing it with for loops but my coding knowledge is relatively basic. Could some generous person help me simplify my code and hopefully annotate it so I can learn how to do it myself? I know this is a big ask but I appreciate any help, thanks!
The end goal is to produce a table, to export, that contains A0 and kobs values for each fit (along with their respective Std. errors).
The kobs values are then passed to another equation to obtain three additional parameters. In the provided data the [100] data set was excluded from the final analysis (the exclusion is justified by external factors). I am new here so if I am formatting something wrong or violating some standards please let me know and I will fix it.
Data (copied from a .csv file):
Time,[0]1,[0]2,[0]3,[1],[2.5],[6],[16],[40],[100]
0,1.008,,0.963,1.038,0.979,0.973,0.973,0.906,0.979
0,0.992,1.000,1.037,0.962,1.021,1.027,1.027,1.094,1.021
5,0.813,0.968,0.961,0.704,0.667,0.470,,,
5,0.861,0.971,0.913,0.713,0.645,0.512,0.353,0.306,0.351
10,0.820,0.868,0.888,0.613,0.407,0.262,,0.158,0.236
10,0.851,0.857,0.890,0.563,0.444,0.250,0.197,0.169,0.275
15,0.778,0.773,0.798,,,,0.154,0.145,0.204
15,0.778,0.752,0.894,0.552,0.308,0.184,0.109,0.146,0.238
20,0.610,0.727,0.806,0.441,0.247,0.180,0.114,0.143,0.269
20,0.747,0.784,0.806,0.426,0.257,0.176,0.138,0.116,0.345
30,,,,0.321,0.164,0.091,0.127,0.100,0.279
30,0.563,0.642,0.633,0.268,0.146,0.082,0.096,0.096,0.275
40,0.556,0.524,0.529,0.286,0.106,0.082,0.037,0.079,0.249
40,0.581,0.485,0.487,0.266,0.119,0.045,,,0.041
## Assign Time (x) data
t <- D1$Time
## Assign Response (y) data
R1 <- D1$`[0]1`
R2 <- D1$`[0]2`
R3 <- D1$`[0]3`
R4 <- D1$`[1]`
R5 <- D1$`[2.5]`
R6 <- D1$`[6]`
R7 <- D1$`[16]`
R8 <- D1$`[40]`
R9 <- D1$`[100]`
## Fit data
F1 <- nls(R1 ~ A1 * exp(-k1 * t), start = list(A1 = 1, k1 = 0.02))
P1 <- summary(F1)$parameters[,1:2]
F2 <- nls(R2 ~ A1 * exp(-k1 * t), start = list(A1 = 1, k1 = 0.02))
P2 <- summary(F2)$parameters[,1:2]
F3 <- nls(R3 ~ A1 * exp(-k1 * t), start = list(A1 = 1, k1 = 0.02))
P3 <- summary(F3)$parameters[,1:2]
F4 <- nls(R4 ~ A1 * exp(-k1 * t), start = list(A1 = 1, k1 = 0.02))
P4 <- summary(F4)$parameters[,1:2]
F5 <- nls(R5 ~ A1 * exp(-k1 * t), start = list(A1 = 1, k1 = 0.02))
P5 <- summary(F5)$parameters[,1:2]
F6 <- nls(R6 ~ A1 * exp(-k1 * t), start = list(A1 = 1, k1 = 0.02))
P6 <- summary(F6)$parameters[,1:2]
F7 <- nls(R7 ~ A1 * exp(-k1 * t), start = list(A1 = 1, k1 = 0.02))
P7 <- summary(F7)$parameters[,1:2]
F8 <- nls(R8 ~ A1 * exp(-k1 * t), start = list(A1 = 1, k1 = 0.02))
P8 <- summary(F8)$parameters[,1:2]
F9 <- nls(R9 ~ A1 * exp(-k1 * t), start = list(A1 = 1, k1 = 0.02))
P9 <- summary(F9)$parameters[,1:2]
## Assemble Table
SS <- c(colnames(D1)[2],colnames(D1)[3],colnames(D1)[4],colnames(D1)[5],colnames(D1)[6],colnames(D1)[7],colnames(D1)[8],colnames(D1)[9],colnames(D1)[10])
A0 <- c(P1[1,1],P2[1,1],P3[1,1],P4[1,1],P5[1,1],P6[1,1],P7[1,1],P8[1,1],P9[1,1])
SEA0 <- c(P1[1,2],P2[1,2],P3[1,2],P4[1,2],P5[1,2],P6[1,2],P7[1,2],P8[1,2],P9[1,2])
kobs <- c(P1[2,1],P2[2,1],P3[2,1],P4[2,1],P5[2,1],P6[2,1],P7[2,1],P8[2,1],P9[2,1])
SEkobs <- c(P1[2,2],P2[2,2],P3[2,2],P4[2,2],P5[2,2],P6[2,2],P7[2,2],P8[2,2],P9[2,2])
ExTab <- cbind(SS, A0, SEA0, kobs, SEkobs)
write_clip(ExTab)
conI <- c(0,0,0,0.5,1.5,4,12,35)
kobsA <- c(P1[2,1],P2[2,1],P3[2,1],P4[2,1],P5[2,1],P6[2,1],P7[2,1],P8[2,1])
kFit <- nls(kobsA ~ k0 + ((kin*conI)/(KI+conI)), start = list(k0 = 0.1, kin = 0.2, KI = 3))
summary(kFit)
My inclination is to use for loops to reduce the repetition, but I am unable to write working ones. I would also like to eliminated the ##Assign Response (y) data section so that I can apply the code to generic data sets with different concentrations in the column names but using D1[2] in place of R1 inside the nls function produces an error.
You can start like this:
t <- D1$Time
for (i in (1:ncol(D1))){
fit <- nls(D1[,i] ~ A1 * exp(-k1 * t), start = list(A1 = 1, k1 = 0.02))
P <- summary(fit)$parameters[,1:2]
A0[i] <- P[1,1]
SEA0[i] <- P[1,2]
kobs[i] <- P[2,1]
SEkobs[i] <- P[2,2]
}
ExTab <- cbind(SS, A0, SEA0, kobs, SEkobs)
write_clip(ExTab)

skeleton in pomp is giving error (unused argument)

I have written following code in R using pomp. Everything works fine except skeleton function.Error is there, unused argument in skel.skel function returns void. I am adding data as an example. Help is solicited and thanks in advance. Error is like this:
C:/Users/admin/AppData/Local/Temp/RtmpmEz0jw/1892/pomp_dfb36bbab4b5ee3a8b4cbc45a0b63e0c.c: In function '__pomp_skelfn':
C:/Users/admin/AppData/Local/Temp/RtmpmEz0jw/1892/pomp_dfb36bbab4b5ee3a8b4cbc45a0b63e0c.c:96:39: warning: 'return' with a value, in function returning void
return(I);
t y1 y2
1 -0.058886245 0.018451378
2 -0.045854389 -0.012812011
3 0 -0.025850852
4 -0.06009386 0.021278254
5 0.053712929 -0.008261274
6 0 0.025270473
7 0.047628037 -0.008357995
8 0.061425641 -0.0146046
9 0 -0.03778035
10 0 -0.004632415
library(magrittr)
library(pomp)
library(ggplot2)
data <- read.csv("C:/Users/admin/Documents/phd/book2.csv", stringsAsFactors = FALSE)
data <- as.data.frame(data)
pomp(
data,times="t", t0=0,
rmeasure=Csnippet("
y1 = rnorm(x1+x2,sigma1);
y2 = rnorm(x2-x1,sigma2);
"),
dmeasure=Csnippet("
lik = dnorm(y1,x1+x2,sigma1,1)+dnorm(y2,x2-x1,sigma2,1);
lik = (give_log) ? lik : exp(lik);
"),
rprocess=discrete.time.sim(
step.fun=Csnippet("
double tx1, tx2;
tx1 = rnorm(a11*x1 + a12*x2,nu1);
tx2 = rnorm(a21*x1 + a22*x2,nu2);
x1 = tx1; x2 = tx2;
"),
delta.t=1),
initializer=Csnippet("
x1 = 0;
x2 = 0;
"),
skeleton = vectorfield(
skel = Csnippet("double I;
I = a11 + a12*x1 + a22 + a21*x2;
return(I);
")
),
toEstimationScale = logtrans <- Csnippet("
Tr1 = log(a11);
TK1 = log(a21);
Tsigma1 = log(sigma1);
"),
fromEstimationScale=exptransexptrans <- Csnippet("
Tr1 = exp(a11);
TK1 = exp(21);
Tsigma1 = exp(sigma1);
"),
statenames=c("x1","x2"),
paramnames=c("a11","a12","a21","a22","sigma1","sigma2","nu1","nu2"),
params=c(a11=0.5,a12=-0.1,a21=0.2,a22=-1,nu1=0.3,nu2=0.1,sigma1=0.1,sigma2=0.3)
) -> parus

How to optimize a non-linear objective function with non-linear constraints in R?

Say, there is a non-linear objective function :
Z= a1 + b1 * ln(x1) + a2 + b2 *ln(x2) with the objective of maximizing Z
subject to the following constraints-
x1 + x2 + x3 >=R1
x1 + x2 + x3 <=R2
a1 + b1 * ln(x1) >=R3
How can the objective function be optimized in R? Tried using 'Rsolnp' package available in R, however not sure how to frame the constraints and the objective function that will be given to the function 'solnp' in the package.
Can anyone help me with this?
Try this (you may want to use other algorithms like NLOPT_LD_MMA with jacobian specified):
library(nloptr)
a1 <- b1 <- 1
a2 <- b2 <- 1
R1 <- R2 <- 1
R3 <- 25
eval_f1 <- function( x, a1, b1, a2, b2, R1, R2, R3){
return(-a1 - b1 * log(x[1]) - a2 - b2 *log(x[2])) # maximize
}
eval_g1 <- function( x, a1, b1, a2, b2, R1, R2, R3) {
return(rbind(x[1] + x[2] + x[3] - R1,
-x[1] - x[2] - x[3] + R2,
R3 - a1 - b1*log(x[1])))
}
nloptr(x0=c(1,1,1),
eval_f=eval_f1,
lb = c(1,1,1),
ub = c(5,5,5),
eval_g_ineq = eval_g1,
opts = list("algorithm"="NLOPT_LN_COBYLA"),
a1 = a1,
b1 = b1,
a2 = a2,
b2 = b2,
R1 = R1,
R2 = R2,
R3 = R3)
#Call:
#nloptr(x0 = c(1, 1, 1), eval_f = eval_f1, lb = c(1, 1, 1), ub = c(5,
# 5, 5), eval_g_ineq = eval_g1, opts = list(algorithm = "NLOPT_LN_COBYLA"),
#a1 = a1, b1 = b1, a2 = a2, b2 = b2, R1 = R1, R2 = R2, R3 = R3)
#Minimization using NLopt version 2.4.0
#NLopt solver status: 5 ( NLOPT_MAXEVAL_REACHED: Optimization stopped because #maxeval (above) was reached. )
#Number of Iterations....: 100
#Termination conditions: relative x-tolerance = 1e-04 (DEFAULT)
#Number of inequality constraints: 3
#Number of equality constraints: 0
#Current value of objective function: -5.08783644210816
#Current value of controls: 5 4.385916 2.550764

Resources