Condition for parameters in a function with optimx in R - r

Hi I made a piecewise linear regression in R as below.
x <- ques$Day
y <- ques$cells_L
fun <- function(par,x){
y1 <- x^0 * par["i1"]
y1[x >= par["x3"]] <- par["i3"]
r1 <- x > par["x1"] & x < par["x2"]
r2 <- x >= par["x2"] & x < par["x3"]
y1[r1] <- par["i1"]+(par["i2"]-par["i1"])/(par["x2"]-par["x1"])*(x[r1]-par["x1"])
y1[r2] <- par["i2"]+(par["i3"]-par["i2"])/(par["x3"]-par["x2"])*(x[r2]-par["x2"])
y1
}
}
#sum of squared residuals
SSR <- function(par) {
sum((y - fun(par, x))^2)
}
ls20 <- optimx(par = c(x1 = 100, x2 = 150, x3 = 200, i1 = 0, i2 = 6, i3 = 0),
fn = SSR, method = "Nelder-Mead")
plot(ques$Day, ques$cells_L, col="black",pch=16)
lines(1:366,
fun(c(x1 = ls21$x1, x2 = ls21$x2, x3 = ls21$x3,
i1 = ls21$i1, i2 = ls21$i2, i3 = ls21$i3), 1:366),col="red")
result plot
It is working well with the result of above image.
However, I want to fix the range for [i3], which is the third intercept value, below the value of [i2].
Where do I put the condition for [i3] range?
enter image description here
I want the results like the blue line.

Related

How can I use try catch for nls function in R

I am doing a regression for a Quadric Linear function. I got two option is to use either nlsLM and nls2. However, for some dataset, the use of nlsLM casing some problem such as: singular gradient matrix at initial parameter estimates or they ran in to an infinitie loop. I want to use the try catch to deal with this issue. Can anyone help me out? Thanks everyone in advance.
Here is the full code:
# Packages needed for estimaton of Ideal trajectory - nonlinear regression
#-------------------------------------------------------------------------------
library("minpack.lm")
library("nlstools")
library("nlsMicrobio")
library("stats")
library("tseries") #runs test for auto correlation
#Use NLS2
library(proto)
library(nls2)
################################################################
# Set working directory
setwd("C:/Users/Kevin Le/PycharmProjects/Pig Data Black Box - Copy")
#load dataset
load("Data/JRPData_TTC.Rdata") #load dataset created in MissingData.step
ID <- 5470
#Create a new dataframe which will store Data after ITC estimation
#Dataframe contains ITC parameters
ITC.param.pos2 <- data.frame(ANIMAL_ID=factor(),
X0=double(),
Y1=double(),
Y2=double(),
Ylast=double(),
a=double(),
b=double(),
c=double(),
d=double(),
stringsAsFactors=FALSE)
#Dataframe contains data points on the ITC
Data.remain <- data.frame(ANIMAL_ID=character(),
Age=double(),
obs.CFI=double(),
tt=double(),
ttt=double(),
stringsAsFactors=FALSE)
#===============================================================
# For loop for automatically estimating ITC of all pigs
#===============================================================
IDC <- seq_along(ID) # 17, 23, 52, 57, 116
for (idc in IDC){
# idc = 1
i <- ID[idc]
Data <- No.NA.Data.1[No.NA.Data.1$ANIMAL_ID == i,]
idc1 <- unique(as.numeric(Data$idc.1))
####### Create data frame of x (Age) and y (CFI) ########
x <- as.numeric(Data$Age.plot)
Y <- as.numeric(Data$CFI.plot)
Z <- as.numeric(Data$DFI.plot)
Data.xy <- as.data.frame(cbind(x,Y))
#Initial parameteres for parameter estimation
X0.0 <- x[1]
Xlast <- x[length(x)]
##################################################################
# 1. reparametrization CFI at X0 = 0
#function used for reparametrization in MAPLE
# solve({
# 0=a+b*X_0+c*X_0**2,
# DFIs=b+2*c*Xs,CFIs=a+b*Xs+c*Xs**2},
# {a,b,c});
# a = -X0*(2*CFIs*Xs-CFIs*X0-Xs^2*DFIs+Xs*DFIs*X0)/(Xs^2-2*X0*Xs+X0^2)
# b = (-Xs^2*DFIs+DFIs*X0^2+2*CFIs*Xs)/(Xs^2-2*X0*Xs+X0^2)
# c = -(CFIs-Xs*DFIs+X0*DFIs)/(Xs^2-2*X0*Xs+X0^2)
# 2. with the source of the function abcd and pred
##################################################################
#Provide set of initial parameters
Xs.1 <- round(seq(X0.0 + 1, Xlast - 1, len = 30), digits = 0)
X0.1 <- rep(X0.0, length(Xs.1))
DFIs.1 <- NULL
CFIs.1 <- NULL
for(A in seq_along(Xs.1)){
DFIs2 <- Data[Data$Age.plot == Xs.1[A],]$DFI.plot
CFIs2 <- Data[Data$Age.plot == Xs.1[A],]$CFI.plot
DFIs.1 <- c(DFIs.1, DFIs2)
CFIs.1 <- c(CFIs.1, CFIs2)
}
st1 <- data.frame(cbind(X0.1, Xs.1, DFIs.1, CFIs.1))
names(st1) <- c("X0","Xs", "DFIs","CFIs")
#RUN NLS2 to find optimal initial parameters
st2 <- nls2(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
Data.xy,
start = st1,
# weights = weight,
# trace = T,
algorithm = "brute-force")
par_init <- coef(st2); par_init
#--------------------------------------------
# Create empty lists to store data after loop
#--------------------------------------------
par <- list()
AC.res <- list()
AC.pvalue <- NULL
data2 <- list()
data3 <- list()
param <- data.frame(rbind(par_init))
par.abcd <- data.frame(rbind(abcd.2(as.vector(par_init))))
param.2 <- data.frame(X0=double(),
Xs=double(),
DFIs=double(),
CFIs=double(),
a=double(),
b=double(),
c=double(),
stringsAsFactors=FALSE)
j <- 2
AC_pvalue <- 0
AC.pvalue[1] <- AC_pvalue
datapointsleft <- as.numeric(dim(Data)[1])
dpl <- datapointsleft #vector of all dataponitsleft at each step
#-------------------------------------------------------------------------------
# Start the procedure of Non Linear Regression
#-------------------------------------------------------------------------------
while ((AC_pvalue<=0.05) && datapointsleft >= 20){
weight <- 1/Y^2
# ---------------- NON linear reg applied to log(Y) ---------------------------------
st2 <- nls2(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
Data.xy,
start = st1,
weights = weight,
trace = F,
algorithm = "brute-force")
par_init <- coef(st2)
par_init
# st1 <- st1[!(st1$Xs == par_init[2]),]
nls.CFI <- nlsLM(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
Data.xy,
control = list(tol = 1e-2, printEval = TRUE, maxiter = 1024),
start = list(X0 = par_init[1], Xs = par_init[2],
DFIs = par_init[3], CFIs = par_init[4]),
weights = weight,
algorithm = "port",
lower = c(-10000,X0.0+1, -10000, -10000),
upper = c(10000, Xlast-1, 10000, 10000),
trace = F)
# nls.CFI <- nls2(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
# Data.xy,
# start = list(X0 = par_init[1], Xs = par_init[2],
# DFIs = par_init[3], CFIs = par_init[4]),
# weights = weight,
# control = nls.control(warnOnly = TRUE),
# trace = T,
# algorithm = "port",
# lower = c(-100000000,X0.0+1, -1000000000, -1000000000),
# upper = c(1000000000, Xlast-1, 1000000000, 1000000000))
# nls.CFI <- nlsLM(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
# Data.xy,
# control = nls.control(warnOnly = TRUE),
# start = list(X0 = par_init[1], Xs = par_init[2],
# DFIs = par_init[3], CFIs = par_init[4]),
# weights = weight,
# algorithm = "port",
# lower = c(-1000000000,X0.0+1, -1000000000, -1000000000),
# upper = c(1000000000, Xlast-1, 1000000000, 1000000000),
# trace = F)
#--------RESULTS analysis GOODNESS of fit
#estimate params
par[[j]] <- coef(nls.CFI)
par.abcd[j,] <- abcd.2(as.vector(coef(nls.CFI) )) #calculation of a, b, c and d
param[j,] <- par[[j]]
param.2[j-1,] <- cbind(param[j,], par.abcd[j,])
#summary
# summ = overview((nls.CFI)) #summary
#residuals
res1 <- nlsResiduals(nls.CFI) #residuals
res2 <- nlsResiduals(nls.CFI)$resi1
res <- res2[, 2]
AC.res <- test.nlsResiduals(res1)
AC.pvalue[j] <- AC.res$p.value
#---------Check for negative residuals----------
#Add filtration step order to data
Step <- rep(j - 1, length(x))
#create a new dataset with predicted CFI included
Data.new <- data.frame(cbind(x, Z, Y, pred.func.2(par[[j]],x)[[1]], res, Step))
names(Data.new) <- c("Age", "Observed_DFI","Observed_CFI", "Predicted_CFI", "Residual", "Step")
# plot(Data.new$Age, Data.new$Predicted_CFI, type = "l", col = "black",lwd = 2,
# ylim = c(0, max(Data.new$Predicted_CFI, Data.new$Observed_CFI)))
# lines(Data.new$Age, Data.new$Observed_CFI, type = "p", cex = 1.5)
#
#remove negative res
Data.pos <- Data.new[!Data.new$Residual<0,]
# lines(Data.pos$Age, Data.pos$Predicted_CFI, type = "l", col = j-1, lwd = 2)
# lines(Data.pos$Age, Data.pos$Observed_CFI, type = "p", col = j, cex = 1.5)
#restart
#Criteria to stop the loop when the estimated parameters are equal to initial parameters
# Crite <- sum(param.2[dim(param.2)[1],c(1:4)] == par_init)
datapointsleft <- as.numeric(dim(Data.pos)[1])
par_init <- par[[j]]
AC_pvalue <- AC.pvalue[j]
j <- j+1
x <- Data.pos$Age
Y <- Data.pos$Observed_CFI
Z <- Data.pos$Observed_DFI
Data.xy <- as.data.frame(cbind(x,Y))
dpl <- c(dpl, datapointsleft)
dpl
#Create again the grid
X0.0 <- x[1]
Xlast <- x[length(x)]
#Xs
if(par_init[2] -15 <= X0.0){
Xs.1 <- round(seq(X0.0 + 5, Xlast - 5, len = 30), digits = 0)
} else if(par_init[2] + 5 >= Xlast){
Xs.1 <- round(seq(par_init[2]-10, par_init[2]-1, len = 6), digits = 0)
} else{
Xs.1 <- round(seq(par_init[2]-5, par_init[2] + 5, len = 6), digits = 0)
}
#
X0.1 <- rep(X0.0, length(Xs.1))
DFIs.1 <- NULL
CFIs.1 <- NULL
for(A in seq_along(Xs.1)){
DFIs2 <- Data[Data$Age.plot == Xs.1[A],]$DFI.plot
CFIs2 <- Data[Data$Age.plot == Xs.1[A],]$CFI.plot
DFIs.1 <- c(DFIs.1, DFIs2)
CFIs.1 <- c(CFIs.1, CFIs2)
}
st1 <- data.frame(cbind(X0.1, Xs.1, DFIs.1, CFIs.1))
if(X0.0 <= par_init[2] && Xlast >=par_init[2]){
st1 <- rbind(st1, par_init)
}
names(st1) <- c("X0","Xs", "DFIs","CFIs")
}
} # end FOR loop
Here is the data file. I have exported my data into the .Rdata for an easier import.: https://drive.google.com/file/d/1GVMarNKWMEyz-noSp1dhzKQNtu2uPS3R/view?usp=sharing
In this file, the set id: 5470 will have this error: singular gradient matrix at initial parameter estimates in this part:
nls.CFI <- nlsLM(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
Data.xy,
control = list(tol = 1e-2, printEval = TRUE, maxiter = 1024),
start = list(X0 = par_init[1], Xs = par_init[2],
DFIs = par_init[3], CFIs = par_init[4]),
weights = weight,
algorithm = "port",
lower = c(-10000,X0.0+1, -10000, -10000),
upper = c(10000, Xlast-1, 10000, 10000),
trace = F)
The complementary functions (file Function.R):
abcd.2 <- function(P){
X0 <- P[1]
Xs <- P[2]
DFIs <- P[3]
CFIs <- P[4]
a <- -X0*(2*CFIs*Xs-CFIs*X0-Xs^2*DFIs+Xs*DFIs*X0)/(Xs^2-2*X0*Xs+X0^2)
b <- (-Xs^2*DFIs+DFIs*X0^2+2*CFIs*Xs)/(Xs^2-2*X0*Xs+X0^2)
c <- -(CFIs-Xs*DFIs+X0*DFIs)/(Xs^2-2*X0*Xs+X0^2)
pp <- as.vector(c(a, b, c))
return(pp)
}
#--------------------------------------------------------------
# NLS function
#--------------------------------------------------------------
nls.func.2 <- function(X0, Xs, DFIs, CFIs){
pp <- c(X0, Xs, DFIs, CFIs)
#calculation of a, b and c using these new parameters
c <- abcd.2(pp)[3]
b <- abcd.2(pp)[2]
a <- abcd.2(pp)[1]
ind1 <- as.numeric(x < Xs)
return (ind1*(a+b*x+c*x^2)+(1-ind1)*((a+b*(Xs)+c*(Xs)^2)+(b+2*c*(Xs))*(x-(Xs))))
}
#--------------------------------------------------------------
# Fit new parameters to a quadratic-linear function of CFI
#--------------------------------------------------------------
pred.func.2 <- function(pr,age){
#
X0 <- pr[1]
Xs <- pr[2]
DFIs <- pr[3]
CFIs <- pr[4]
#
x <- age
#calculation of a, b and c using these new parameters
c <- abcd.2(pr)[3]
b <- abcd.2(pr)[2]
a <- abcd.2(pr)[1]
#
ind1 <- as.numeric(x < Xs)
#
results <- list()
cfi <- ind1*(a+b*x+c*x^2)+(1-ind1)*((a+b*(Xs)+c*(Xs)^2)+(b+2*c*(Xs))*(x-(Xs))) #CFI
dfi <- ind1*(b+2*c*x) + (1 - ind1)*(b+2*c*(Xs)) #DFI
results[[1]] <- cfi
results[[2]] <- dfi
return (results)
}
#---------------------------------------------------------------------------------------------------------------
# Quadratic-linear function of CFI curve and its 1st derivative (DFI) with original parameters (only a, b and c)
#---------------------------------------------------------------------------------------------------------------
pred.abcd.2 <- function(pr,age){
#
a <- pr[1]
b <- pr[2]
c <- pr[3]
x <- age
#calculation of a, b and c using these new parameters
#
ind1 <- as.numeric(x < Xs)
#
results <- list()
cfi <- ind1*(a+b*x+c*x^2)+(1-ind1)*((a+b*(Xs)+c*(Xs)^2)+(b+2*c*(Xs))*(x-(Xs))) #CFI
dfi <- ind1*(b+2*c*x) + (1 - ind1)*(b+2*c*(Xs)) #DFI
results[[1]] <- cfi
results[[2]] <- dfi
return (results)
}
Updated: I did review my logic from the previous step and found that my data is a bit messed up because of it. I have fixed it. The case where a set f data ran into an infinite loop has no longer exists, but this error is still there however: singular gradient matrix at initial parameter estimates.

Every time variable increases, create a new sample using a for loop

I need to do the following: I would like to increase variable pi1 from -1 to 0 by 0.0001 steps under the condition that all the other variables stay the same. As a second step I need to select 1000 samples for each different pi1 value. In the end, I would need to measure the biasedness of both regressions to the real value. After a thorough investigation, I really don't see why I both loops are not working.
This sort of an idea how it could work
index <- seq(1, 1000)
beta_OLS <- NULL
beta_IV <- NULL
for(i in seq(from = -1, to = 0, by = 0.001)) {
for(k in index) {
n <- 2000
pi1 <- i
b0 <- 0
b1 <- 0
b2 <- -1/1000
b3 <- 1/5
z <- runif(n, 0, 25)
ov <- rnorm(n, 0, 1)
d <- -1/2 + pi1 * z + 1/2 * ov + rnorm(n, 0, 1) > 0
y <- b0 + b1 * d + b2 * z + b3 * ov + rnorm(n, 0, 1/10)
#OLS Regression
model12 = lm(y ~ d, data = data)
beta_OLS[k] = model12$coefficients[2]
#IV Regression
model12_1 = ivreg(y ~ d | z, data=data)
beta_IV[k] = model12_1$coefficients[2]
}
}
real_value <- - 1/1000
average_OLS <- mean(beta_OLS)
average_IV <- mean(beta_IV)
biased_OLS <- average_OLS - real_value
biased_IV <- average_IV - real_value
biased_OLS
biased_IV
Did a few alterations here and it ran. Not sure if the result are right.
Since both loops had the same count, eliminated one of them.
Also attributed the results of d and y on every run to a dataframe called data (that you used as source for your regressions).
index = seq(1,1000)
beta_OLS = NULL
beta_IV = NULL
i = -1
for(k in index){
n <- 2000
pi1 <- i
b0 <- 0
b1 <- 0
b2 <- -1/1000
b3 <- 1/5
z <- runif(n,0,25)
ov <- rnorm(n,0,1)
d <- -1/2 + pi1 * z + 1/2 * ov + rnorm(n,0,1) > 0
y <- b0 + b1 * d + b2 * z + b3 * ov + rnorm(n,0,1/10)
data = as.data.frame(cbind(y,d))
#OLS Regression
model12 = lm(y ~ d, data = data)
beta_OLS[k] = model12$coefficients[2]
#IV Regression
model12_1 = ivreg::ivreg(y ~ d | z, data=data)
beta_IV[k] = model12_1$coefficients[2]
pi1 <- i + 0.001
}
real_value = - 1/1000
average_OLS = mean(beta_OLS)
average_IV = mean(beta_IV)
biased_OLS = average_OLS - real_value
biased_IV = average_IV - real_value
biased_OLS
biased_IV

why random effect estiamator are not correct

I'm trying to simulate glmmLasso using a binomial data.
but random effect estiamator are not similar 5 that i given.
something wrong in my code?
if not, why random effect shown like that.
makedata <- function(I, J, p, sigmaB){
N <- I*J
# fixed effect generation
beta0 <- runif(1, 0, 1)
beta <- sort(runif(p, 0, 1))
# x generation
x <- matrix(runif(N*p, -1, 1), N, p)
# random effect generation
b0 <- rep(rnorm(I, 0, sigmaB), each=J)
# group
group <- as.factor(rep(1:I, each = J))
# y generation
k <- exp(-(beta0 + x %*% beta + b0))
y <- rbinom(n = length(k), size = 1, prob = (1/(1+k)))
#standardization
sx <- scale(x, center = TRUE, scale = TRUE)
simuldata <- data.frame(y = y, x = sx, group)
res <- list(simuldata=simuldata)
return(res)
}
# I : number of groups
I <- 20
# J : number of observation in group
J <- 10
# p : number of variables
p <- 20
# sigmaB : sd of random effect b0
sigmaB <- 5
set.seed(231233)
simdata <- makedata(I, J, p, sigmaB)
lam <- 10
xnam <- paste("x", 1:p, sep=".")
fmla <- as.formula(paste("y ~ ", paste(xnam, collapse= "+")))
glmm <- glmmLasso(fmla, rnd = list(group=~1), data = simdata, lambda = lam, control = list(scale = T, center = T))
summary(glmm)

Extract inclusion probabilities and positive probabilities from BoomSpikeSlab model

The default plot function of BoomSpikeSlab models is a bar plot of each predictor's inclusion probability, colored by its probability of being positive:
set.seed(0)
simulate.lm.spike <- function(n=100, p=10, ngood=3, niter=1000, sigma=1) {
x <- cbind(matrix(rnorm(n * (p - 1)), nrow=n))
beta <- c(rnorm(ngood), rep(0, p - ngood))
y <- rnorm(n, beta[1] + x %*% beta[-1], sigma)
draws <- lm.spike(y ~ x, niter=niter)
return(invisible(draws))
}
model <- simulate.lm.spike(n=1000, p=50, sigma=.3)
plot(model, inclusion.threshold=.01)
How can I extract the data behind this plot, i.e. a data frame with each predictor's inclusion probability and probability of being positive?
Adapting the PlotMarginalInclusionProbabilities function:
GetMarginalInclusionProbabilities = function(
model,
burn = 0,
inclusion.threshold = 0,
unit.scale = TRUE,
number.of.variables = NULL) {
beta <- model$beta
if (burn > 0) {
beta <- beta[-(1:burn), , drop = FALSE]
}
inclusion.prob <- colMeans(beta != 0)
index <- order(inclusion.prob)
beta <- beta[, index, drop = FALSE]
inclusion.prob <- inclusion.prob[index]
compute.positive.prob <- function(x) {
## Compute the probability that x is positive, given that it is
## nonzero. If all(x == 0) then zero is returned.
x <- x[x != 0]
if (length(x) == 0) {
return(0)
}
return(mean(x > 0))
}
positive.prob <- apply(beta, 2, compute.positive.prob)
res <- data.frame(predictor = names(inclusion.prob),
inclusion.prob = inclusion.prob,
positive.prob = positive.prob)
return(res[order(-res$inclusion.prob), ])
}
Example:
GetMarginalInclusionProbabilities(model)
# predictor inclusion.prob positive.prob
# (Intercept) (Intercept) 1.000 1
# x1 x1 1.000 0
# x2 x2 0.999 1
# x15 x15 0.014 1
# x43 x43 0.002 1

Error in df(X0) : argument "df1" is missing, with no default--tracing R code

I have written two gradient descent functions and in the second one I just have the alpha parameter and the initial alpha is different. I receive a weird error and was unable to trace the reason for it.
Here's the code:
k=19000
rho.prime<-function(t,k) ifelse (abs(t)<=k,2*t,(2*k*sign(t)))
dMMSE <- function(b,k=19000, y=farmland$farm, x=farmland$land){
n = length(y)
a=0
d=0
for (i in 1:n) {
a = a + rho.prime(y[i]-b[1]-b[2]*x[i],k)
d = d + x[i]*rho.prime(y[i]-b[1]-b[2]*x[i],k)
}
a <- (-a/n)
d <- (-d/n)
return(c(a,d))
}
grd=gr.descent(dMMSE, c(3500,0.33),alpha=0.0001, verbose=TRUE)
gr.descent2 <- function(dMMSE,x0, alpha=0.1, eps=0.001, max.it = 50, verbose = FALSE){
X1 <- x0
cond <- TRUE
iteration <- 0
if(verbose) cat("X0 =",X1,"\n")
while(cond){
iteration <- iteration + 1
X0 <- X1
X1 <- X0 - alpha * df(X0)
alpha <- alpha/2
cond <- sum((X1 - X0)^2) > eps & iteration < max.it
if(verbose) cat(paste(sep="","X",iteration," ="), X1, "\n")
}
print("mona2")
print(X1)
return(X1)
}
grd2=gr.descent2(dMMSE, c(3500,0.33),alpha=0.1, verbose=TRUE)
#(beta0=grd2[1])
#(beta1=grd2[2])
So when I run the code I receive this error:
[1] "mona"
[1] 3496.409 -259466.640
X0 = 3500 0.33
Show Traceback
Rerun with Debug
Error in df(X0) : argument "df1" is missing, with no default
Which is related to gr.descent2 function. Any thought?
Type this:
?df # the F distribution density
And notice that the df1 and df2 arguments are not assumed to be any particular value so they do need to be supplied.
integrate( function(x) df(x, 1, 100), 0, 3.84)
# 0.9471727 with absolute error < 1.4e-05
And notice the similarity of result:
> integrate( function(x) dchisq(x, 1), 0, 3.84)
0.9499565 with absolute error < 1.4e-05
Here's the answer:
farmland <- read.csv("http://pages.stat.wisc.edu/~gvludwig/327-5/FarmLandArea.csv")
str(farmland)
plot(farm~land,data=farmland)
fit=lm(farm~land,data=farmland)
abline(fit) #lease square regression line
abline(rlm(farm~land,data=farmland),col="red")
gr.descent <- function(der_f, x0, alpha=0.0001, eps=0.001, max.it = 50, verbose = FALSE){
X1 <- x0
cond <- TRUE
iteration <- 0
if(verbose) cat("X0 =",X1,"\n")
while(cond){
iteration <- iteration + 1
X0 <- X1
X1 <- X0 - alpha * der_f(X0)
cond <- sum((X1 - X0)^2) > eps & iteration < max.it
if(verbose) cat(paste(sep="","X",iteration," ="), X1, "\n")
}
print("mona")
print(X1)
return(X1)
}
rho<-function(t,k) ifelse(abs(t)<=k,t^2,(2*k*abs(t))-k^2)
k=19000
rho.prime<-function(t,k) ifelse (abs(t)<=k,2*t,(2*k*sign(t)))
dMMSE <- function(b,k=19000, y=farmland$farm, x=farmland$land){
n = length(y)
a=0
d=0
for (i in 1:n) {
a = a + rho.prime(y[i]-b[1]-b[2]*x[i],k)
d = d + x[i]*rho.prime(y[i]-b[1]-b[2]*x[i],k)
}
a <- (-a/n)
d <- (-d/n)
return(c(a,d))
}
grd=gr.descent(dMMSE, c(3500,0.33),alpha=0.0001, verbose=TRUE)
gr.descent2 <- function(der_f,x0, alpha=0.1, eps=0.001, max.it = 50, verbose = FALSE){
X1 <- x0
cond <- TRUE
iteration <- 0
if(verbose) cat("X0 =",X1,"\n")
while(cond){
iteration <- iteration + 1
X0 <- X1
X1 <- X0 - alpha * der_f(X0)
alpha <- alpha/2
cond <- sum((X1 - X0)^2) > eps & iteration < max.it
if(verbose) cat(paste(sep="","X",iteration," ="), X1, "\n")
}
print("mona2")
print(X1)
return(X1)
}
#plot(farm~land,data=farmland)
#curve(rho(k=19000),xlim=c(-10,10),,col="blue", add="TRUE")
grd2=gr.descent2(dMMSE, c(3500,0.33),alpha=0.1, verbose=TRUE)
#(beta0=grd2[1])
#(beta1=grd2[2])

Resources