Force GAM model fit to be monotonic and go through a fixed point (x0, y0) with R mgcv - r

I am trying to fit a GAM model to data under two constraints simultatenously: (1) the fit is monotonic (increasing), (2) the fit goes through a fixed point, say, (x0,y0).
So far, I managed to have these two constraints work separately:
For (1), based on mgcv::pcls() documentation examples, by using mgcv::mono.con() to get linear constraints sufficient for monotonicity, and estimate model coefs via mgcv::pcls(), using the constraints.
For (2), based on this post, by setting the value of spline at knot location x0 to 0 + using offset term in the model formula.
However, I struggle to combine these two constraints simultaneously. I guess a way to go is mgcv::pcls(), but I could work out neither (a) doing a similar trick of setting the value of spline at knot location x0 to 0 + using offset nor (b) setting equality constraint(s) (which I think could yield my (2) constraint setup).
I also note that the approach for setting the value of spline at knot location x0 to 0 for my constrain condition (2) yields weirdly wiggly outcome (as compared to unconstrained GAM fit) -- as showed below.
Attempt so far: fit a smooth function to data under two constraints separately
Simulate some data
library(mgcv)
set.seed(1)
x <- sort(runif(100) * 4 - 1)
f <- exp(4*x)/(1+exp(4*x))
y <- f + rnorm(100) * 0.1
dat <- data.frame(x=x, y=y)
GAM unconstrained (for comparison)
k <- 13
fit0 <- gam(y ~ s(x, k = k, bs = "cr"), data = dat)
# predict from unconstrained GAM fit
newdata <- data.frame(x = seq(-1, 3, length.out = 1000))
newdata$y_pred_fit0 <- predict(fit0, newdata = newdata)
GAM constrained: (1) the fit is monotonic (increasing)
k <- 13
# Show regular spline fit (and save fitted object)
f.ug <- gam(y~s(x,k=k,bs="cr"))
# explicitly construct smooth term's design matrix
sm <- smoothCon(s(x,k=k,bs="cr"),dat,knots=NULL)[[1]]
# find linear constraints sufficient for monotonicity of a cubic regression spline
# it assumes "cr" is the basis and its knots are provided as input
F <- mono.con(sm$xp)
G <- list(
X=sm$X,
C=matrix(0,0,0), # [0 x 0] matrix (no equality constraints)
sp=f.ug$sp, # smoothing parameter estimates (taken from unconstrained model)
p=sm$xp, # array of feasible initial parameter estimates
y=y,
w= dat$y * 0 + 1 # weights for data
)
G$Ain <- F$A # matrix for the inequality constraints
G$bin <- F$b # vector for the inequality constraints
G$S <- sm$S # list of penalty matrices; The first parameter it penalizes is given by off[i]+1
G$off <- 0 # Offset values locating the elements of M$S in the correct location within each penalty coefficient matrix. (Zero offset implies starting in first location)
p <- pcls(G); # fit spline (using smoothing parameter estimates from unconstrained fit)
# predict
newdata$y_pred_fit2 <- Predict.matrix(sm, data.frame(x = newdata$x)) %*% p
# plot
plot(y ~ x, data = dat)
lines(y_pred_fit0 ~ x, data = newdata, col = 2, lwd = 2)
lines(y_pred_fit2 ~ x, data = newdata, col = 4, lwd = 2)
Blue line: constrained; red line: unconstrained
GAM constrained: (2) fitted go through (x0,y0)=(-1, -0.1)
k <- 13
## Create a spline basis and penalty
## Make sure there is a knot at the constraint point (here: -1)
knots <- data.frame(x = seq(-1,3,length=k))
# explicit construction of a smooth term in a GAM
sm <- smoothCon(s(x,k=k,bs="cr"), dat, knots=knots)[[1]]
## 1st parameter is value of spline at knot location -1, set it to 0 by dropping
knot_which <- which(knots$x == -1)
X <- sm$X[, -knot_which] ## spline basis
S <- sm$S[[1]][-knot_which, -knot_which] ## spline penalty
off <- dat$y * 0 + (-0.1) ## offset term to force curve through (x0, y0)
## fit spline constrained through (x0, y0)
gam_1 <- gam(y ~ X - 1 + offset(off), paraPen = list(X = list(S)))
# predict (add offset of -0.1)
newdata_tmp <- Predict.matrix(sm, data.frame(x = newdata$x))
newdata_tmp <- newdata_tmp[, -knot_which]
newdata$y_pred_fit1 <- (newdata_tmp %*% coef(gam_1))[, 1] + (-0.1)
# plot
plot(y ~ x, data = dat)
lines(y_pred_fit0 ~ x, data = newdata, col = 2, lwd = 2)
lines(y_pred_fit1 ~ x, data = newdata, col = 3, lwd = 2)
# lines at cross of which the plot should go throught
abline(v=-1, col = 3); abline(h=-0.1, col = 3)
Green line: constrained; red line: unconstrained

I think you could augment the data vectors x and y with (x0, y0) and then put a (really) high weight on the first observation (i.e. add a weight vector to your G list).
Alternatively to the simple weighting strategy, we can write the quadratic programming problem starting from the results of the preliminary smoothing. This is illustrated in the second R-code below (in this case I used p-spline smoothers, see Eilers and Marx 1991).
Hope this helps a bit (a similar problem is discussed here).
Rcode example 1 (weight strategy)
set.seed(123)
N = 100
x <- sort(runif(N) * 4 - 1)
f <- exp(4*x)/(1+exp(4*x))
y <- f + rnorm(N) * 0.1
x = c(-1, x)
y = c(-0.1, y)
dat = data.frame(x = x, y= y)
k <- 13
fit0 <- gam(y ~ s(x, k = k, bs = "cr"), data = dat)
# predict from unconstrained GAM fit
newdata <- data.frame(x = seq(-1, 3, length.out = 1000))
newdata$y_pred_fit0 <- predict(fit0, newdata = newdata)
k <- 13
# Show regular spline fit (and save fitted object)
f.ug <- gam(y~s(x,k=k,bs="cr"))
# explicitly construct smooth term's design matrix
sm <- smoothCon(s(x,k=k,bs="cr"),dat,knots=NULL)[[1]]
# find linear constraints sufficient for monotonicity of a cubic regression spline
# it assumes "cr" is the basis and its knots are provided as input
F <- mono.con(sm$xp)
G <- list(
X=sm$X,
C=matrix(0,0,0), # [0 x 0] matrix (no equality constraints)
sp=f.ug$sp, # smoothing parameter estimates (taken from unconstrained model)
p=sm$xp, # array of feasible initial parameter estimates
y=y,
w= c(1e8, 1:N * 0 + 1) # weights for data
)
G$Ain <- F$A # matrix for the inequality constraints
G$bin <- F$b # vector for the inequality constraints
G$S <- sm$S # list of penalty matrices; The first parameter it penalizes is given by off[i]+1
G$off <- 0 # Offset values locating the elements of M$S in the correct location within each penalty coefficient matrix. (Zero offset implies starting in first location)
p <- pcls(G); # fit spline (using smoothing parameter estimates from unconstrained fit)
# predict
newdata$y_pred_fit2 <- Predict.matrix(sm, data.frame(x = newdata$x)) %*% p
# plot
plot(y ~ x, data = dat)
lines(y_pred_fit0 ~ x, data = newdata, col = 2, lwd = 2)
lines(y_pred_fit2 ~ x, data = newdata, col = 4, lwd = 2)
abline(v = -1)
abline(h = -0.1)
rm(list = ls())
library(mgcv)
library(pracma)
library(colorout)
set.seed(123)
N = 100
x = sort(runif(N) * 4 - 1)
f = exp(4*x)/(1+exp(4*x))
y = f + rnorm(N) * 0.1
x0 = -1
y0 = -0.1
dat = data.frame(x = x, y= y)
k = 50
# Show regular spline fit (and save fitted object)
f.ug = gam(y~s(x,k=k,bs="ps"))
# explicitly construct smooth term's design matrix
sm = smoothCon(s(x,k=k,bs="ps"), dat,knots=NULL)[[1]]
# Build quadprog to estimate the coefficients
scf = sapply(f.ug$smooth, '[[', 'S.scale')
lam = f.ug$sp / scf
Xp = rbind(sm$X, sqrt(lam) * f.ug$smooth[[1]]$D)
yp = c(dat$y, rep(0, k - 2))
X0 = Predict.matrix(sm, data.frame(x = x0))
sm$deriv = 1
X1 = Predict.matrix(sm, data.frame(x = dat$x))
coef_mono = pracma::lsqlincon(Xp, yp, Aeq = X0, beq = y0, A = -X1, b = rep(0, N))
# fitted values
fit = sm$X %*% coef_mono
sm$deriv = 0
xf = seq(-1, 3, len = 1000)
Xf = Predict.matrix(sm, data.frame(x = xf))
fine_fit = Xf %*% coef_mono
# plot
par(mfrow = c(2, 1), mar = c(3,3,3,3))
plot(dat$x, dat$y, pch = 1, main= 'Data and fit')
lines(dat$x, f.ug$fitted, lwd = 2, col = 2)
lines(dat$x, fit, col = 4, lty = 1, lwd = 2)
lines(xf, fine_fit, col = 3, lwd = 2, lty = 2)
abline(h = -0.1)
abline(v = -1)
plot(dat$x, X1 %*% coef_mono, type = 'l', main = 'Derivative of the fit', lwd = 2)
abline(h = 0.0)

The following package seems to implement what you are looking for:
The proposed shape constrained smoothing has been incorporated into generalized
additive models with a mixture of unconstrained and shape restricted smooth terms
(mono-GAM). [...]
The proposed modelling approach has been implemented in an R package monogam.
The model setup is the same as in mgcv(gam) with the addition of shape constrained
smooths. In order to be consistent with the unconstrained GAM, the package provides
key functions similar to those associated with mgcv(gam).
Additive models with shape constraints

Related

How to fit Gaussian distribution with one-sided data?

x <- c(-3,-2.5,-2,-1.5,-1,-0.5)
y <- c(2,2.5,2.6,2.9,3.2,3.3)
The challenge is that the entire data is from the left slope, how to generate a two-sided Gaussian Distribution?
There is incomplete information with regards to the question. Hence several ways can be implemented. NOTE that the data is insufficient. ie trying fitting tis by nls does not work.
Here is one way to tackle it:
f <- function(par, x, y )sum((y - par[3]*dnorm(x,par[1],par[2]))^2)
a <- optim(c(0, 1, 1), f, x = x, y = y)$par
plot(x, y, xlim = c(-3,3.5), ylim = c(2, 3.5))
curve(dnorm(x, a[1], a[2])*a[3], add = TRUE, col = 2)
There is no way to fit a Gaussian distribution with these densities. If correct y-values had been provided this would be one way of solving the problem:
# Define function to be optimized
f <- function(pars, x, y){
mu <- pars[1]
sigma <- pars[2]
y_hat <- dnorm(x, mu, sigma)
se <- (y - y_hat)^2
sum(se)
}
# Define the data
x <- c(-3,-2.5,-2,-1.5,-1,-0.5)
y <- c(2,2.5,2.6,2.9,3.2,3.3)
# Find the best paramters
opt <- optim(c(-.5, .1), f, 'SANN', x = x, y = y)
plot(
seq(-5, 5, length.out = 200),
dnorm(seq(-5, 5, length.out = 200), opt$par[1], opt$par[2]), type = 'l', col = 'red'
)
points(c(-3,-2.5,-2,-1.5,-1,-0.5), c(2,2.5,2.6,2.9,3.2,3.3))
Use nls to get a least squares fit of y to .lin.a * dnorm(x, b, c) where .lin.a, b and c are parameters to be estimated.
fm <- nls(y ~ cbind(a = dnorm(x, b, c)),
start = list(b = mean(x), c = sd(x)), algorithm = "plinear")
fm
giving:
Nonlinear regression model
model: y ~ cbind(a = dnorm(x, b, c))
data: parent.frame()
b c .lin.a
0.2629 3.2513 27.7287
residual sum-of-squares: 0.02822
Number of iterations to convergence: 7
Achieved convergence tolerance: 2.582e-07
The dnorm model (black curve) seems to fit the points although even a straight line (blue line) involving only two parameters (intercept and slope) instead of 3 isn't bad.
plot(y ~ x)
lines(fitted(fm) ~ x)
fm.lin <- lm(y ~ x)
abline(fm.lin, col = "blue")

Is it possible to specify lower bound in response variable during smooth with gam?

I am trying to fit a smoothed surface of z against x and y using formula z ~ s(x, y) with gam function
in mgcv package. My goal is to predict response z based on new values of x and y.
In my real situation, z should be a positive number negative z would be meaningless. However, the predicted zs
are sometimes negative. It seems that for some region, there is not enough points in the training data to estimate z
accurately.
My question is: Is there a way to specifiy a lower bound of z during smooth in gam so that later I won't get negative zs with predict?
Below is a minimal example that reproduces this issue.
library(mgcv)
x <- seq(0.1, 1, by = 0.01)
y <- seq(0.1, 1, by = 0.01)
dtt <- expand.grid(x = x, y = y)
set.seed(123)
dtt$xp <- dtt$x + rnorm(nrow(dtt)) / 100
dtt$yp <- dtt$y + rnorm(nrow(dtt)) / 100
dtt$z <- 1 / (dtt$xp^2 + dtt$yp^2)
m <- sample.int(nrow(dtt), 3000)
dtt.train <- dtt[m, ]
dtt.test <- dtt[!(1:nrow(dtt) %in% m), ]
fit <- gam(z ~ s(x, y), data = dtt.train)
p <- predict(fit, newdata = dtt.test)
plot(dtt.test$z, p, xlab = 'Real', ylab = 'Predicted', pch = 19, col = 1 + (p < 0))
abline(h = 0, v = 0)
As you can see, for the red points. the real values are positive but the predicted values are negative.

I would like to visualize the third order interaction fitted with thin-plate regression splines

I am a beginner of R so it may be a simple question.
I am now trying to fit a 4-dimensional point using thin-plate regression splines. One variable is a target variable and three variables are an explanatory variable.
I made a model with third order interaction and fitted the data to this.
library(mgcv)
dat <- read.csv('../data//data.csv')
model <- gam(Y ~ s(x1, x2, x3), data=dat)
By giving x3, I want to visualize a three-dimensional graph of spline curve or estimated contour plot, but how do I do it?
It will be very helpful if you can answer.
Thanks.
This is the sample data.
n = 100
x1 <- runif(n, min = 0, max = 100)
x2 <- runif(n, min = 0, max = 100)
x3 <- runif(n, min = 0, max = 100)
Y = numeric(n)
for(i in 1:n){
Y[i] <- x1[i]**0.5*x2[i]**2*x3[i]/10000
}
dat = data.frame(Y=Y, x1=x1, x2=x2, x3=x3)
I do thin-plane regression spline using this dat.
model <- gam(Y ~ s(x1, x2, x3, k= 50), data=dat)
Then, I would like to obtain a fitting curve of three-dimensional thin-plane regression spline or contour plot estimated by regression spline when x3 = 25, for example.
To make a contour plot, you can use contour(x, y, z, ...). z is your data Matrix (in your case, Y[x1,x2, ], x and y are index vectors from 0 to 1 with a length of nrow(Y[x1,x2, ]) and ncol(Y[x1,x2, ]).
You should be able to use it similar to:
contour( x = seq(0, 1, length.out = length(x1)), y = seq(0, 1, length.out = length(x2)), z = Y[x1,x2, ] )
I found a solution with reference to the answer of d0d0.
n=100
const=25
x = y = seq(0, n, 1)
f = function(x,y){
dtmp <- data.frame(x1=(x), x2=(y), x3=(const))
pred <- predict.gam(model, dtmp)
}
z = outer(x, y, f)
contour(x,y,z)

Quadratic spline

Is it there a way to adjust a quadratic spline (instead of a cubic one) to some data?
I have this data and I don't seem to find the appropiate function in R to do this.
Expanding just a bit on the comments above, you can use a B-spline basis (implemented in function splines::bs()), setting degree=2 rather than the default degree=3:
library(splines)
## Some example data
set.seed(1)
x <- 1:10
y <- rnorm(10)
## Fit a couple of quadratic splines with different degrees of freedom
f1 <- lm(y ~ bs(x, degree = 2)) # Defaults to 2 - 1 = 1 degree of freedom
f9 <- lm(y ~ bs(x, degree = 2, df=9))
## Plot the splines
x0 <- seq(1, 10, by = 0.1)
plot(x, y, pch = 16)
lines(x0, predict(f1, data.frame(x = x0)), col = "blue")
lines(x0, predict(f9, data.frame(x = x0)), col = "red")

R: predict.glm equivalent for MCMCpack::MCMClogit

I am running a Bayesian logit with MCMCpack::MCMClogit. The syntax is easy and follows lm() or glm(), but I can't find any equivalent of the predict.glm function. Is there any way of predicting the probabilities of the outcomes in MCMClogit for each unit of observation in the dataframe? predict() is especially useful for validating training data from new data, which is what I ultimately have to do.
df = read.csv("http://dl.dropbox.com/u/1791181/MCMC.csv")#Read in data
model.glm = glm(SECONDARY.LEVEL ~ AGE + SEX, data=df, family=binomial(link=logit))
glm.predict = predict(model.glm, type="response")
For MCMClogit():
model.mcmc = MCMClogit(SECONDARY.LEVEL ~ AGE + SEX, data=df, mcmc=1000)
You could use the posterior distribution of model parameters produced by MCMC to get a distribution of predictions, using the logistic function.
For instance, if your model formula is y ~ x1 + x2 + x3, and your MCMC output is stored in the variable posterior.mcmc, then you could use
function(x1, x2, x3) 1 / (1 + exp(-posterior.mcmc %*% rbind(1, x1, x2, x3)))
to give the distribution analogous to predict.glm(., 'response')
More detailed example for the case of a single input variable:
library(extraDistr)
library(MCMCpack)
# Take x uniformly distributed between -100 and 100
x <- runif(2000, min=-100, max=100)
# Generate a response which is logistic with some noise
beta <- 1/8
eps <- rnorm(length(x), 0, 1)
p <- function(x, eps) 1 / (1 + exp(-beta*x + eps))
p.x <- p(x, eps)
y <- sapply(p.x, function(p) rbern(1, p))
df1 <- data.frame(x, y)
# Fit by logistic regression
glm.logistic <- glm(y ~ x, df1, family=binomial)
# MCMC gives a distribution of values for the model parameters
posterior.mcmc <- MCMClogit(y ~ x, df1, verbose=2000)
densplot(posterior.mcmc)
# Thus, we have a distribution of model predictions for each x
predict.p.mcmc <- function(x) 1 / (1 + exp(-posterior.mcmc %*% rbind(1,x)))
interval.p.mcmc <- function(x, low, high) apply(predict.p.mcmc(x), 2,
function(x) quantile(x, c(low, high)))
predict.y.mcmc <- function(x) posterior.mcmc %*% rbind(1,x)
interval.y.mcmc <- function(x, low, high) apply(predict.y.mcmc(x), 2,
function(x) quantile(x, c(low, high)))
## Plot the data and fits ##
plot(x, p.x, ylab = 'Pr(y=1)', pch = 20, cex = 0.5, main = 'Probability vs x')
# x-values for prediction
x_test <- seq(-100, 100, 0.01)
# Blue line is the logistic function we used to generate the data, with noise removed
p_of_x_test <- p(x_test, 0)
lines(x_test, p_of_x_test, col = 'blue')
# Green line is the prediction from logistic regression
lines(x_test, predict(glm.logistic, data.frame(x = x_test), 'response'), col = 'green')
# Red lines indicates the range of model predictions from MCMC
# (for each x, 95% of the distribution of model predictions lies between these bounds)
interval.p.mcmc_95 <- interval.p.mcmc(x_test, 0.025, 0.975)
lines(x_test, interval.p.mcmc_95[1,], col = 'red')
lines(x_test, interval.p.mcmc_95[2,], col = 'red')
# Similarly for the log-odds
plot(x, log(p.x/(1 - p.x)), ylab = 'log[Pr(y=1) / (1 - Pr(y=1))]',
pch = 20, cex = 0.5, main = 'Log-Odds vs x')
lines(x_test, log(p_of_x_test/(1 - p_of_x_test)), col = 'blue')
lines(x_test, predict(glm.logistic, data.frame(x = x_test)), col = 'green')
interval.y.mcmc_95 <- interval.y.mcmc(x_test, 0.025, 0.975)
lines(x_test, interval.y.mcmc_95[1,], col = 'red')
lines(x_test, interval.y.mcmc_95[2,], col = 'red')
The description of the function says :
This function generates a sample from the posterior distribution of a logistic regression model using a random walk Metropolis algorithm.
I think therefore that your model.mcmc already contains the points that MCMClogit() has simulated.
You can use str to see what it contains and summary and plot functions on it like in the example there : http://cran.r-project.org/web/packages/MCMCpack/MCMCpack.pdf

Resources