I'm moving some code from Matlab to R and I'm facing some difficulties in a particular case of the handle function. This is my Matlab code:
function Application_ChFun
clear;close all;clc;warning('off');
StepsYr = 10;
%% --parameters-- %%
S0 = 1;
r = 0.0;
t0 = 0;
T2 = 5;
gamma = 0.5;
kappa = 0.3;
rho = -0.6;
vBar = 0.05;
v0 = 0.04;
NoOfPaths = 5e4;
NoOfSteps = StepsYr*T2;
%% --Define model-- %%
cf = #(u,T)ChFun(u, T, kappa,vBar,gamma,rho, v0, r);
Vc = #(t,x)MktFun(cf,t,x,log(S0));
% Define bump size
bump_T = 1e-4;
bump_K = #(T)1e-4;
% Define derivatives
dC_dT = #(T,K) (Vc(T + bump_T,K) - Vc(T ,K)) / bump_T;
dC_dK = #(T,K) (Vc(T,K + bump_K(T)) - Vc(T,K - bump_K(T))) / (2 * bump_K(T));
d2C_dK2 = #(T,K) (Vc(T,K + bump_K(T)) + Vc(T,K-bump_K(T)) - 2*Vc(T,K)) / bump_K(T)^2;
t = t0;
S = S0+zeros(NoOfPaths,1);
for i = 1:NoOfSteps
if i==1
t_adj = 1/NoOfSteps;
t = t_adj;
end
% AAA perfectly matches with the R equivalent, but AAB and AAC do not.
AAA = dC_dT(t,S);
AAB = dC_dK(t,S);
AAC = d2C_dK2(t,S);
end
function value = MktFun(cf,T,x,x0)
value = CM_Proxy(cf,T,x,x0);
function value = CM_Proxy(ChF,T,K,x0)
K(K<1e-5)=1e-5;
alpha = 0.75;
c = 3e2;
N_CM = 2^12;
eta = c/N_CM;
b = pi/eta;
u = [0:N_CM-1]*eta;
lambda = 2*pi/(N_CM*eta);
i = complex(0,1);
u_new = u-(alpha+1)*i;
cf = exp(i*u_new*x0).*ChF(u_new,T);
psi = cf./(alpha^2+alpha-u.^2+i*(2*alpha+1)*u);
SimpsonW = 3+(-1).^[1:N_CM]-[1,zeros(1,N_CM-1)];
SimpsonW(N_CM) = 0;
SimpsonW(N_CM-1) = 1;
FFTFun = exp(i*b*u).*psi.*SimpsonW;
payoff = real(eta*fft(FFTFun)/3);
strike = exp(-b:lambda:b-lambda);
payoff_specific = spline(strike,payoff,K);
value = exp(-log(K)*alpha).*payoff_specific/pi;
function cf=ChFun(u, tau, kappa,vBar,gamma,rho, v0, r)
i = complex(0,1);
D_1 = sqrt(((kappa -i*rho*gamma.*u).^2+(u.^2+i*u)*gamma^2));
g = (kappa- i*rho*gamma*u-D_1)./(kappa-i*rho*gamma*u+D_1);
C = (1/gamma^2)*(1-exp(-D_1*tau))./(1-g.*exp(-D_1*tau)).*(kappa-gamma*rho*i*u-D_1);
A = i*u*r*tau + kappa*vBar*tau/gamma^2 * (kappa-gamma*rho*i*u-D_1)-2*kappa*vBar/gamma^2*log((1-g.*exp(-D_1*tau))./(1-g));
cf = exp(A + C * v0);
where MktFun is a standard function. When g=dC_dK(t,S) is called first of all bump_K(T) is evaluated and then Vc(T,K + bump_K(T)) and Vc(T,K-bump_K(T)).
In R I have the following:
Application_ChFun <- function(){
StepsYr = 10
## --parameters-- ##
S0 = 1
r = 0.0
t0 = 0
T2 = 5
gamma = 0.5
kappa = 0.3
rho = -0.6
vBar = 0.05
v0 = 0.04
NoOfPaths = 5e4
NoOfSteps = StepsYr*T2
## --Define model-- ##
cf <- function(u,T) ChFun(u,T,kappa,vBar,gamma,rho, v0, r)
Vc <- function(t,x) MktFun(cf,t,x,log(S0))
# Define bump size
bump_T = 1e-4
bump_K <- function(T) 1e-4
# Define derivatives
dC_dT <- function(T,K) (Vc(T + bump_T,K) - Vc(T ,K)) / bump_T
dC_dK <- function(T,K) (Vc(T,K + bump_K(T)) - Vc(T,K - bump_K(T))) / (2 * bump_K(T))
d2C_dK2 <- function(T,K) (Vc(T,K + bump_K(T)) + Vc(T,K - bump_K(T)) - 2*Vc(T,K)) / bump_K(T)^2
t = t0
S = S0+rep(0,NoOfPaths)
for (i in 1:NoOfSteps){
t_real = t
if (i==1){
t_adj = 1/NoOfSteps;
t = t_adj
}
# AAA perfectly matches with the R's equivalent. But AAB and AAC do not.
AAA = dC_dT(t,S)
AAB = dC_dK(t,S)
AAC = d2C_dK2(t,S)
}
}
MktFun <- function(cf,T,x,x0){
return(CM_Proxy(cf,T,x,x0))
}
CM_Proxy <- function(ChF,T,K,x0){
K[K<1e-5] = 1e-5
alpha = 0.75
c = 3e2
N_CM = 2^12
eta = c/N_CM
b = pi/eta
u = (0:(N_CM-1))*eta
lambda = 2*pi/(N_CM*eta)
i = complex(real = 0, imaginary = 1)
u_new = u - (alpha+1)*i # European call option.
cf = exp(i*u_new*x0)*ChF(u_new,T)
psi = cf/(alpha^2+alpha-u^2+i*(2*alpha+1)*u)
SimpsonW = 3+(-1)^(1:N_CM)-c(1,rep(0,N_CM-1))
SimpsonW[N_CM] = 0
SimpsonW[N_CM-1] = 1
FFTFun = exp(i*b*u)*psi*SimpsonW
payoff = Re(eta*fft(FFTFun)/3)
strike = exp(seq(-b,b-lambda,lambda))
K = as.vector(K)
payoff_specific = stinepack::stinterp(strike,payoff,K)
value = exp(-log(K)*alpha)*payoff_specific$y/pi
return(value)
}
ChFun <- function(u, tau, kappa,vBar,gamma,rho, v0, r){
i = complex(real = 0, imaginary = 1)
D_1 = sqrt(((kappa - i*rho*gamma*u)^2 + (u^2+i*u)*gamma^2))
g = (kappa - i*rho*gamma*u - D_1) / (kappa - i*rho*gamma*u + D_1)
C = (1/gamma^2)*(1-exp(-D_1*tau))/(1-g*exp(-D_1*tau))*(kappa-gamma*rho*i*u-D_1)
A = i*u*r*tau + kappa*vBar*tau/gamma^2 * (kappa-gamma*rho*i*u-D_1) +
-2*kappa*vBar/gamma^2*log((1-g*exp(-D_1*tau))/(1-g))
cf = exp(A + C * v0)
return(cf)
}
The problem is that in this case is that g=dC_dK(t,S) calls directly Vc instead of calling bump_k(T) first. Could someone please suggest a solution?
The evaluation order of functions is not necessarily inside-out (as it appears you are expected) so much as the order as needed. R tries to do things lazily, so if you include an expensive operation that is never actually referenced, it is not realized.
Take this example:
f1 <- function(a) { message("f1"); a + 1; }
f2 <- function(b) { message("f2"); f1(b) + 2; }
f3 <- function(d) { message("f3"); f2(f1(d) + 3) / f2(f1(d) + 4); }
f3(2)
# f3
# f2
# f1
# f1
# f2
# f1
# f1
# [1] 0.9
When f3 is called, the calls to f2 are the next to be evaluated. When f2 is first called (with f1(d)+3), f2 is called with the unevaluated argument. Once f2 tries to use its b, only then is it evaluated and f1 is called.
If I look at the call stack on the first call to f1, then we see:
Browse[2]> where
where 1 at #1: f1(b)
where 2 at #1: f2(f1(d) + 3)
where 3 at #1: f3(2)
showing the order of functions is f3 called first, then f2, then from there f1.
Related
suppose you have this simulated data:
N = 1000; alpha_bar = 0.7; gamma_h = 16; gamma_c = 50
mu_hw = 4; sigma_hw = 0.1; mu_i = 4; sigma_i = 0.5; sigma_eps = 0.01
set.seed(1)
wage <- exp(rnorm(N, mean = mu_hw, sd = sigma_hw))
set.seed(1)
nlincome <- exp(rnorm(N, mu_i, sigma_i))
set.seed(1)
e_ <- rnorm(N, 0, sigma_eps)
obs_data <- data.frame(w = wage,
I = nlincome,
eps = e_) %>%
mutate(alpha = alpha_bar + eps,
h = alpha*gamma_h - ((1-alpha)*(I-gamma_c))/w) %>%
select(w,I,h)
And now assume that you've forgotten these parameters (alpha_bar, gamma_h, gamma_c, sigma_eps). So you want to estimate those by maximum likelihood using the obs_data dataframe.
From the model I am working, I know that epsilon has this form:
Furthermore, I have derived the Log-Likelihood function like this: (I wrote this)
So I know I have to minimize the negative log-likelihood function, so I have tried to do it like this:
normal_loglik <- function(par){
g_h <- par[1]; g_c <- par[2]; a_bar <- par[3]; sigma_e <- par[4]
d <- mutate(obs_data,
num = h + ((I-g_c)/w),
den = g_h + ((I-g_c)/w),
eps = (num/den)-a_bar,
arg_1 = 0.5*(eps/sigma_e)^2,
arg_2 = log(abs(den)),
opt = log(sigma_e * sqrt(2*pi)) + arg_1 + arg_2)
LL <- - sum(d$opt)
return(-LL)
}
opt_par <- optim(rep(1e-4,4),
normal_loglik,
method = "SANN")$par
real_param <- c(gamma_h, gamma_c, alpha_bar, sigma_eps)
# Comparison
print(real_param); print(opt_par)
It does run, but I get these estimates that are way too far away from the real ones:
I am trying to implement the following formula in R where r0, t, theta0 and alpha are constants. Also, I is a Modified Bessel function of the first kind. My issue, I suppose, is from the Sum term to the end of the formula. I set n = 150 given that the function converges to zero fast so there is no need to go beyond 150. I am using the "Bessel" package.
Formula1
Formula2
Results to reproduce first row = t, second row = Defaultcorr in %
Here is what I have thus far. I can't seem to find my mistake. Defaultcorr should be 0.04 % when t = 1 (according to the image "Results to reproduce").
To obtain this result " m " should be equal to 6.234611709.
V1 = 5
V2 = 5
K1 = 1
K2 = 1
sigma1 = 0.3
sigma2 = 0.3
Z1 = log((V1/K1)/sigma1)
Z2 = log((V2/K2)/sigma2)
t = 1
rho = 0.4
#One firm default -> Firm #1 when lambda = mu
PD_asset1 = 2 * pnorm(-(Z1/sqrt(t)))
PD_asset1
PD_asset2 = 2 * pnorm(-(Z2/sqrt(t)))
PD_asset2
#Results assuming that lambda = mu
#Conditions for alpha, theta0, r0
if (rho < 0) { #alpha
alpha = atan(-(sqrt(1-rho^2)) / rho)
} else {
alpha = pi + atan(-(sqrt(1-rho^2)) / rho)
}
if (rho > 0) { #theta0
theta0 = atan((Z2 * sqrt(1 - rho^2)) / (Z1 - (rho * Z2)))
} else {
theta0 = pi + atan((Z2 * sqrt(1 - rho^2)) / (Z1 - (rho * Z2)))
}
r0 = (Z2 / sin(theta0)) #r0
#Simplified function
h = function(n) {
(sin((n * pi * theta0)/alpha)/n)
}
n = seq(1, 150, 2)
Bessel1 = (besselI(((r0^2)/(4*t)), (0.5*(((n*pi)/alpha) + 1)), FALSE))
Bessel2 = (besselI(((r0^2)/(4*t)), (0.5*(((n*pi)/alpha) - 1)), FALSE))
l = matrix(data = n, ncol = n)
m = apply((h(l)*(Bessel1 + Bessel2)), 2, FUN = sum)
PD_asset1_or_asset2 = 1 - (((2 * r0)/(sqrt(2*pi*t))) * (exp(-(r0^2)/(4*t))) * m)
PD_asset1_or_asset2
Var_asset1 = PD_asset1 * (1 - PD_asset1)
Var_asset1
Var_asset2 = PD_asset2 * (1 - PD_asset2)
Var_asset2
PD_asset1_and_asset2 = PD_asset1 + PD_asset2 - PD_asset1_or_asset2
PD_asset1_and_asset2
Defaultcorr = (PD_asset1_and_asset2 - (PD_asset1 * PD_asset2)) / (sqrt(Var_asset1 * Var_asset2))
Defaultcorr
Any help would be appreciated. Thank you
I am trying to make a table from a while loop. Basically, I want to make a while loop where the value of r increases by 1 and repeats this until the inequality is met. But in addition to that, I want to combine these values into a table with three columns: the value of r, the value of w, and the value of rhs (rounded to 3 decimal places).
```{r}
al = 0.10; n = 30; a = 3; b = 5; r = 2; int = 8; h = (int/2); msE = 19.19
table = function(MSE, V, H, alpha = al, r = 2){
rhs = h^2*r/((V-1)*MSE)
w = qf(alpha, V-1, V*(r-1), lower.tail = FALSE)
g = data.frame(r, round(w, 3), round(rhs, 3))
while(w > rhs){
r = r+1
rhs = h^2*r/((V-1)*MSE)
w = qf(alpha, V-1, V*(r-1), lower.tail = FALSE)
g = data.frame(r, round(w, 3), round(rhs, 3))
}
rbind(g)
}
table(MSE = msE, V = a*b, H = h)
```
I figured it would go something like this, but this only prints out the last value of r before the loop ends (it ends at 26), which results in a "table" that only has one row. I would like a table with 24 rows (since it starts at r = 2).
Any help would be appreciated!
Perhaps this might help:
al = 0.10; n = 30; a = 3; b = 5; r = 2; int = 8; h = (int/2); msE = 19.19
table = function(MSE, V, H, alpha = al, r = 2){
rhs = h^2*r/((V-1)*MSE)
w = qf(alpha, V-1, V*(r-1), lower.tail = FALSE)
g = data.frame(r, round(w, 3), round(rhs, 3))
gn = data.frame(r, round(w, 3), round(rhs, 3))
while(w > rhs){
r = r+1
rhs = h^2*r/((V-1)*MSE)
w = qf(alpha, V-1, V*(r-1), lower.tail = FALSE)
g = data.frame(r, round(w, 3), round(rhs, 3))
gn <- rbind(gn,g)
}
return(gn)
}
table(MSE = msE, V = a*b, H = h)
A slightly different approach, eliminating the need for an interim data frame and for rbind(). Commented in the code.
# your parameters
al <- 0.10; n <- 30; a <- 3; b <- 5; int <- 8; h <- (int/2); msE <- 19.19
# your function definition (name changed to avoid confusion / conflict with existing R function)
tabula <- function(MSE, V, H, alpha = al, r = 2)
{
g <- data.frame( N = 0, W = 1, RHS = 0 ) # initiate data frame, values set
# so that the while condition is met
# the while function populates the data frame cell by cell,
# eliminating the need for an interim data.frame and rbind()
while( g[ r - 1, "W" ] > g[ r - 1, "RHS" ] ) # check condition in the last data frame row
{ # write values in a new row
g[ r, "N" ] <- r
g[ r, "W" ] <- round( qf( alpha, V - 1, V * ( r - 1 ), lower.tail = FALSE ), 3 )
g[ r, "RHS" ] <- round( h^2 * r / ( ( V - 1 ) * MSE ), 3 )
r <- r + 1 # increment row counter
}
return( g[ -1, ] ) # return the data frame, removing the initial row
}
tabula( MSE = msE, V = a * b, H = h )
I am trying to program the basic Vanilla TrueSkill (3.1) algorithm in R but am getting some strange results.
My code is the following:
# A simple test between two players repeatedly laying one another
betaSq = 0.1
obs = 10000
p1_skills = 0.333
p2_skills = 0
p1_draws = rnorm(obs, p1_skills, sqrt(betaSq))
p2_draws = rnorm(obs, p2_skills, sqrt(betaSq))
p1_pred_mu = rep(NA, obs+1)
p1_pred_sigmaSq = rep(NA, obs+1)
p2_pred_mu = rep(NA, obs+1)
p2_pred_sigmaSq = rep(NA, obs+1)
# Initial values
p1_pred_mu[1] = 0
p1_pred_sigmaSq[1] = 1
p2_pred_mu[1] = 0
p2_pred_sigmaSq[1] = 1
results = p1_draws > p2_draws
probs = rep(NA, obs)
# Run TrueSkill
for (i in seq(2,obs+1)) {
probs[i-1] = predictProb(p1_pred_mu[i-1], p1_pred_sigmaSq[i-1], p2_pred_mu[i-1], p2_pred_sigmaSq[i-1], betaSq)
out = updateSkill(p1_pred_mu[i-1], p1_pred_sigmaSq[i-1], p2_pred_mu[i-1], p2_pred_sigmaSq[i-1], betaSq, results[i-1])
# Now update based on the out
p1_pred_mu[i] = out$mu1
p1_pred_sigmaSq[i] = out$sigmaSq1
p2_pred_mu[i] = out$mu2
p2_pred_sigmaSq[i] = out$sigmaSq2
}
# Output results
dev.new()
mu = p1_pred_mu
lower = qnorm(0.05, p1_pred_mu, p1_pred_sigmaSq)
upper = qnorm(0.95, p1_pred_mu, p1_pred_sigmaSq)
plot(mu, ylim = c(min(lower), max(upper)), main = "p1")
lines(lower)
lines(upper)
dev.new()
mu = p2_pred_mu
lower = qnorm(0.05, p2_pred_mu, p2_pred_sigmaSq)
upper = qnorm(0.95, p2_pred_mu, p2_pred_sigmaSq)
plot(mu, ylim = c(min(lower), max(upper)), main = "p2")
lines(lower)
lines(upper)
a = filter(probs, rep(1, 20))/20
dev.new()
plot(a)
print(sprintf("Mean p1: %g", mean(p1_pred_mu)))
print(sprintf("Mean p2: %g", mean(p2_pred_mu)))
print(sprintf("Mean results: %g", mean(results)))
print(sprintf("Mean predicted results: %g", mean(probs)))
The functions that are called are:
# Functions
updateSkill <- function(mu1, sigmaSq1, mu2, sigmaSq2, betaSq, result) {
# http://papers.nips.cc/paper/3331-trueskill-through-time-revisiting-the-history-of-chess.pdf
c = 2*betaSq + sigmaSq1 + sigmaSq2
if (result == 1) {
# Player 1 wins
v = dnorm((mu1-mu2)/c)/pnorm((mu1-mu2)/c)
w = v*(v+(mu1-mu2)/c)
mu1 = mu1 + (sigmaSq1/c)*v
mu2 = mu2 - (sigmaSq2/c)*v
sigmaSq1 = sigmaSq1 * sqrt(1 - (sigmaSq1/c^2)*w)
sigmaSq2 = sigmaSq2 * sqrt(1 - (sigmaSq2/c^2)*w)
} else if (result == 0) {
# Player 2 wins
v = dnorm((mu2-mu1)/c)/pnorm((mu2-mu1)/c)
w = v*(v+(mu2-mu1)/c)
mu1 = mu1 - (sigmaSq1/c)*v
mu2 = mu2 + (sigmaSq2/c)*v
sigmaSq1 = sigmaSq1 * sqrt(1 - (sigmaSq1/c^2)*w)
sigmaSq2 = sigmaSq2 * sqrt(1 - (sigmaSq2/c^2)*w)
}
return(list(mu1=mu1, mu2=mu2, sigmaSq1=sigmaSq1, sigmaSq2=sigmaSq2))
}
predictProb <- function(mu1, sigmaSq1, mu2, sigmaSq2, betaSq) {
# Try to predict the probability of player 1 beating player 2 using Trueskill model
mean1 = mu1
mean2 = mu2
var1 = sigmaSq1 + betaSq
var2 = sigmaSq2 + betaSq
# Now the dist of player1 - player2 is N(mean1 - mean2, sqrt(var1 + var2))
prob1Wins = pnorm(0, mean2 - mean1, sqrt(var1 + var2))
return(prob1Wins)
}
I hate to post the massive code blob but I really cannot figure out where things are going wrong.
This program runs and the predicted skills (distributed to a N(mu, sigma)) converge. However the predicted probabilities they are giving are not converging to the true probabilities for the results!
A sample output is:
[1] "Mean p1: 0.0762161"
[1] "Mean p2: -0.0762161"
[1] "Mean results: 0.7733"
[1] "Mean predicted results: 0.631424"
Any idea what is going wrong?
The reason this didn't work is because in the 3rd line of the updateSkills function it should read
c = sqrt(2*betaSq + sigmaSq1 + sigmaSq2)
not
c = 2*betaSq + sigmaSq1 + sigmaSq2
These days, I have a problem with R and RStudio. The R script cannot run in RStudio, it often is interrupted. But it can successfully run in the original console. Here is the script:
library(MASS)
simulation <- function(index) {
n = 800 #100 200 400 800 1600 3200
p = 0.5 * n #0.5*n 0.8*n 1.2*n
delta = 0.5 #0.5
k0 = 1
y = array(0, dim = c(p, n))
# simulation1 r=3
# ---------------------------------------------------------
r = 4
A = array(runif(p * r, min = -1, max = 1), dim = c(p, r))
x = array(0, dim = c(r, n)) #x = array(0, dim = c(p, n))
epsilon = array(t(mvrnorm(n, mu = rep(0, p), Sigma = diag(1, p, p))),
dim = c(p, n))
e = array(t(mvrnorm(n, mu = rep(0, r), Sigma = diag(1, r, r))), dim = c(r,
n))
Pi = diag(c(0.6, -0.5, 0.3, 0.6), r, r)
for (i in 2:n) {
x[, i] = Pi %*% x[, i - 1, drop = FALSE] + e[, i]
}
A[, 2] = A[, 2]/p^(delta/2) # weak factor
y1 = A %*% x + epsilon
y = t(scale(t(y1), center = T, scale = FALSE))
Mhat = array(0, dim = c(p, p))
for (i in 1:k0) {
covk = y[, (i + 1):n, drop = FALSE] %*% t(y[, 1:(n - i), drop = FALSE])/(n -
i)
Mhat = Mhat + covk %*% t(covk)
}
R = p/2
ratio = array(0, dim = c(R, 1))
temp = eigen(Mhat)
value = temp$values
vector = temp$vectors
for (i in 1:R) {
ratio[i] = value[i + 1]/value[i]
}
plot(ratio, type = "l")
rhat1 = which.min(ratio)
Ahat1 = vector[, 1:rhat1, drop = FALSE]
# two step
ystar = y1 - Ahat1 %*% t(Ahat1) %*% y1
ystar = t(scale(t(ystar), center = T, scale = FALSE))
Mhatstar = array(0, dim = c(p, p))
for (i in 1:k0) {
covk = ystar[, (i + 1):n, drop = FALSE] %*% t(ystar[, 1:(n - i),
drop = FALSE])/(n - i)
Mhatstar = Mhatstar + covk %*% t(covk)
}
temp1 = eigen(Mhatstar)
valuestar = temp1$values
vector1 = temp1$vectors
ratiostar = array(0, dim = c(R, 1))
for (i in 1:R) {
ratiostar[i] = valuestar[i + 1]/valuestar[i]
}
plot(ratiostar, type = "l")
rhat2 = which.min(ratiostar)
# Ahat2 = vector1[, 1:rhat2, drop = FALSE]
rhat = rhat1 + rhat2
# Ahat = cbind2(Ahat1, Ahat2)
return(rhat)
}
result=replicate(200,simulation())
The program is often interrupted. I do not know why.
My desktop is : win7
RStudio version :0.98.426
The log file is :
15 Oct 2013 11:02:50 [rsession-Administrator] WARNING Abort requested; LOGGED FROM: bool session::connection::checkForAbort(boost::shared_ptr<session::HttpConnection>, boost::function<void()>) C:\Users\Administrator\rstudio\src\cpp\session\http\SessionHttpConnectionUtils.cpp:146
15 Oct 2013 11:03:00 [rsession-Administrator] ERROR r error 5 (R symbol not found) [symbol (option)=ggvis.renderer]; OCCURRED AT: T r::options::getOption(const std::string&, const T&) [with T = std::basic_string<char, std::char_traits<char>, std::allocator<char> >] C:\Users\Administrator\rstudio\src\cpp\r\include/r/ROptions.hpp:73; LOGGED FROM: T r::options::getOption(const std::string&, const T&) [with T = std::basic_string<char, std::char_traits<char>, std::allocator<char> >] C:\Users\Administrator\rstudio\src\cpp\r\include/r/ROptions.hpp:75