nls leave one out cv plot in R - r

I'm trying to do leave one out cross validation of non linear regression and plot the optimal fit. I feel like my loocv and plot functions are totally wrong. Could anybody clarify what I'm doing wrong?
data(Boston, package='MASS')
y <- Boston$nox
x <- Boston$dis
n <- length(x)
nla <- n
las <- seq(0, .85, length=nla)
cvs <- rep(0, nla)
for(j in 1:nla) {
prs <- rep(0,n)
for(i in 1:n) {
yi <- y[-i]
xi <- x[-i]
d <- nls(y~ A + B * exp(C * x), start=list(A=0.5, B=0.5, C=-0.5))
prs[i] <- predict(d, newdata=data.frame(xi=x[i]))
}
cvs[j] <- mean( (y - prs)^2 )
}
cvs[j]
plot(y~x, pch=19, col='gray', cex=1.5,xlab='dis', ylab='nox')
d <- nls(y~ A + B * exp(C * x), start=list(A=0.5, B=0.5, C=-0.5))
lines(predict(d)[order(x)]~sort(x), lwd=4, col='black')

You seem to have been close, but in your loop you were still calling the full set of data x and y. As far as I can tell, you only need a single loop to fit the model to each leave-one-out scenario. Thus, I can't see the need for the variables las nor prs. For reference, the plot shows the leave-one-out mean squared error (LOO MSE) and the mean squared error of the residuals (MSE) for the nls model fit to the full data set.
Script:
require(MASS)
data(Boston, package='MASS')
y <- Boston$nox
x <- Boston$dis
n <- length(x)
cvs <- rep(0, n)
for(j in seq(n)){
ys <- y[-j]
xs <- x[-j]
d <- nls(ys ~ A + B * exp(C * xs), start=list(A=0.5, B=0.5, C=-0.5))
cvs[j] <- (y[j] - predict(d, data.frame(xs=x[j])))^2
print(paste0(j, " of ", n, " finished (", round(j/n*100), "%)"))
}
plot(y~x, pch=19, col='gray', cex=1.5, xlab='dis', ylab='nox')
d <- nls(y~ A + B * exp(C * x), start=list(A=0.5, B=0.5, C=-0.5))
lines(predict(d)[order(x)]~sort(x), lwd=4, col='black')
usr <- par("usr")
text(usr[1] + 0.9*(usr[2]-usr[1]), usr[3] + 0.9*(usr[4]-usr[3]), paste("LOO MSE", "=", round(mean(cvs), 5)), pos=2)
text(usr[1] + 0.9*(usr[2]-usr[1]), usr[3] + 0.8*(usr[4]-usr[3]), paste("MSE", "=", round(mean(resid(d)^2), 5)), pos=2)

Related

Performing residual bootstrap using kernel regression in R

Kernel regression is a non-parametric technique that wants to estimate the conditional expectation of a random variable. It uses local averaging of the response value, Y, in order to find some non-linear relationship between X and Y.
I am have used bootstrap for kernel density estimation and now want to use it for kernel regression as well. I have been told to use residual bootstrapping for kernel regression and have read a couple of papers on this. I am however unsure how to perform this. Programming has been done in R using the FKSUM package. I have made an attempt to use standard resampling on kernel regression:
library(FKSUM)
set.seed(1)
n <- 5000
sample.size <- 500
B.replications <- 200
x <- rbeta(n, 2, 2) * 10
y <- 3 * sin(2 * x) + 10 * (x > 5) * (x - 5)
y <- y + rnorm(n) + (rgamma(n, 2, 2) - 1) * (abs(x - 5) + 3)
#taking x.y to be the population
x.y <- data.frame(x, y)
xs <- seq(min(x), max(x), length = 1000)
ftrue <- 3 * sin(2 * xs) + 10 * (xs > 5) * (xs - 5)
#Sample from the population
seqx<-seq(1,5000,by=1)
sample.ind <- sample(seqx, size = sample.size, replace = FALSE)
sample.reg<-x.y[sample.ind,]
x_s <- sample.reg$x
y_s <- sample.reg$y
fhat_loc_lin.pop <- fk_regression(x, y)
fhat_loc_lin.sample <- fk_regression(x = x_s, y = y_s)
plot(x, y, col = rgb(.7, .7, .7, .3), pch = 16, xlab = 'x',
ylab = 'x', main = 'Local linear estimator with amise bandwidth')
lines(xs, ftrue, col = 2, lwd = 3)
lines(fhat_loc_lin, lty = 2, lwd = 2)
#Bootstrap
n.B.sample = sample.size # sample bootstrap size
boot.reg.mat.X <- matrix(0,ncol=B.replications, nrow=n.B.sample)
boot.reg.mat.Y <- matrix(0,ncol=B.replications, nrow=n.B.sample)
fhat_loc_lin.boot <- matrix(0,ncol = B.replications, nrow=100)
Temp.reg.y <- matrix(0,ncol = B.replications,nrow = 1000)
for(i in 1:B.replications){
sequence.x.boot <- seq(from=1,to=n.B.sample,by=1)
sample.ind.boot <- sample(sequence.x.boot, size = sample.size, replace = TRUE)
boot.reg.mat <- sample.reg[sample.ind.boot,]
boot.reg.mat.X <- boot.reg.mat$x
boot.reg.mat.Y <- boot.reg.mat$y
fhat_loc_lin.boot <- fk_regression(x = boot.reg.mat.X ,
y = boot.reg.mat.Y,
h = fhat_loc_lin.sample$h)
lines(y=fhat_loc_lin.boot$y,x= fhat_loc_lin.sample$x, col =c(i) )
Temp.reg.y[,i] <- fhat_loc_lin.boot$y
}
quan.reg.l <- vector()
quan.reg.u <- vector()
for(i in 1:length(xs)){
quan.reg.l[i] <- quantile(x = Temp.reg.y[i,],probs = 0.025)
quan.reg.u[i] <- quantile(x = Temp.reg.y[i,],probs = 0.975)
}
# Lower Bound
Temp.reg.2 <- quan.reg.l
lines(y=Temp.reg.2,x=fhat_loc_lin.boot$x ,col="red",lwd=4,lty=1)
# Upper Bound
Temp.reg.3 <- quan.reg.u
lines(y=Temp.reg.3,x=fhat_loc_lin.boot$x ,col="navy",lwd=4,lty=1)
Asking the question on here now since I haven't received any response on CV. Any help would be greatly appreciated!

See the plot close to the intersection

## Define the moment generating function of Weibull distribution
scale <- 1/2
shape <- 1
lambda <- 2
beta <- 0.1
## I have specified nmax=160 since I cant perform the sum until infinity
mgfw <- function(x){
nmax <- 160
scale <- scale
shape <- shape
suma <- 0
for(n in 0:nmax){
suma <- suma + ((x^n) * ((scale)^n)) * gamma(1 + (n/shape)) / factorial(n)
}
return(suma)
}
curve(mgfw, from=0.1, 0.25, ylim=c(1, 1.2))
mu <- (scale) * gamma(1 + (1 / shape))
fun2 <- function(x) 1 + x * (1 + beta) * mu
x <- seq(0, 10, length.out=100)
y <- fun2(x)
curve(fun2, from=0, 10, add=TRUE)
grid()
Solving the previous equations I got the next results:
library(rootSolve)
r <- uniroot.all(function(x) mgfw(x) - fun2(x), c(0.1, 0.185))
r
abline(v=r)
I got a plot like this
The intersection of both lines is given by the vertical line. But I would like to get a plot where the intersection can be clear in the plot. How to resize the plot? Or see in the area with different scale?

Fitting data points to an ellipse with its center at the origin using R

I have a question about fitting ellipses to data with the ellipse center at the origin. I have explored two methods that fit ellipses but generate an arbitrary center unless I manipulate the data with some imaginary mirror points.
Method#01
This portion of the script directly comes from this useful post. I'm copying the codes directly here for ease.
fit.ellipse <- function (x, y = NULL) {
# from:
# http://r.789695.n4.nabble.com/Fitting-a-half-ellipse-curve-tp2719037p2720560.html
#
# Least squares fitting of an ellipse to point data
# using the algorithm described in:
# Radim Halir & Jan Flusser. 1998.
# Numerically stable direct least squares fitting of ellipses.
# Proceedings of the 6th International Conference in Central Europe
# on Computer Graphics and Visualization. WSCG '98, p. 125-132
#
# Adapted from the original Matlab code by Michael Bedward (2010)
# michael.bedward#gmail.com
#
# Subsequently improved by John Minter (2012)
#
# Arguments:
# x, y - x and y coordinates of the data points.
# If a single arg is provided it is assumed to be a
# two column matrix.
#
# Returns a list with the following elements:
#
# coef - coefficients of the ellipse as described by the general
# quadratic: ax^2 + bxy + cy^2 + dx + ey + f = 0
#
# center - center x and y
#
# major - major semi-axis length
#
# minor - minor semi-axis length
#
EPS <- 1.0e-8
dat <- xy.coords(x, y)
D1 <- cbind(dat$x * dat$x, dat$x * dat$y, dat$y * dat$y)
D2 <- cbind(dat$x, dat$y, 1)
S1 <- t(D1) %*% D1
S2 <- t(D1) %*% D2
S3 <- t(D2) %*% D2
T <- -solve(S3) %*% t(S2)
M <- S1 + S2 %*% T
M <- rbind(M[3,] / 2, -M[2,], M[1,] / 2)
evec <- eigen(M)$vec
cond <- 4 * evec[1,] * evec[3,] - evec[2,]^2
a1 <- evec[, which(cond > 0)]
f <- c(a1, T %*% a1)
names(f) <- letters[1:6]
# calculate the center and lengths of the semi-axes
#
# see http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2288654/
# J. R. Minter
# for the center, linear algebra to the rescue
# center is the solution to the pair of equations
# 2ax + by + d = 0
# bx + 2cy + e = 0
# or
# | 2a b | |x| |-d|
# | b 2c | * |y| = |-e|
# or
# A x = b
# or
# x = Ainv b
# or
# x = solve(A) %*% b
A <- matrix(c(2*f[1], f[2], f[2], 2*f[3]), nrow=2, ncol=2, byrow=T )
b <- matrix(c(-f[4], -f[5]), nrow=2, ncol=1, byrow=T)
soln <- solve(A) %*% b
b2 <- f[2]^2 / 4
center <- c(soln[1], soln[2])
names(center) <- c("x", "y")
num <- 2 * (f[1] * f[5]^2 / 4 + f[3] * f[4]^2 / 4 + f[6] * b2 - f[2]*f[4]*f[5]/4 - f[1]*f[3]*f[6])
den1 <- (b2 - f[1]*f[3])
den2 <- sqrt((f[1] - f[3])^2 + 4*b2)
den3 <- f[1] + f[3]
semi.axes <- sqrt(c( num / (den1 * (den2 - den3)), num / (den1 * (-den2 - den3)) ))
# calculate the angle of rotation
term <- (f[1] - f[3]) / f[2]
angle <- atan(1 / term) / 2
list(coef=f, center = center, major = max(semi.axes), minor = min(semi.axes), angle = unname(angle))
}
Let's take a example distribution of polar points for illustration purpose
X<-structure(list(x_polar = c(0, 229.777200000011, 246.746099999989,
-10.8621999999741, -60.8808999999892, 75.8904999999795, -83.938199999975,
-62.9770000000135, 49.1650999999838, 52.3093000000226, 49.6891000000178,
-66.4248999999836, 34.3671999999788, 242.386400000018, 343.60619999998
), y_polar = c(0, 214.868299999973, 161.063599999994, -68.8972000000067,
-77.0230000000447, 93.2863000000361, -16.2356000000145, 27.7828000000445,
-17.8077000000048, 2.10540000000037, 25.6866000000155, -84.6034999999683,
-31.1800000000512, 192.010800000047, 222.003700000001)), .Names = c("x_polar",
"y_polar"), row.names = c(NA, -15L), class = "data.frame")
efit <- fit.ellipse(X)
e <- get.ellipse(efit)
#plot
par(bg=NA)
plot(X, pch=3, col='gray', lwd=2, axes=F, xlab="", ylab="", type='n',
ylim=c(min(X$y_polar)-150, max(X$y_polar)), xlim=c(min(X$x_polar)-150, max(X$x_polar))) #blank plot
points(X$x_polar, X$y_polar, pch=3, col='gray', lwd=2, axes=F, xlab="", ylab="") #observations
lines(e, col="red", lwd=3, lty=2) #plotting the ellipse
points(0,0,col=2, lwd=2, cex=2) #center/origin
To bring the origin of the ellipse at the center we could modify as follows (surely not the best way of doing it)
#generate mirror coordinates
X$x_polar_mirror<- -X$x_polar
X$y_polar_mirror<- -X$y_polar
mydata<-as.matrix(data.frame(c(X$x_polar, X$x_polar_mirror), c(X$y_polar, X$y_polar_mirror)))
#fit the data
efit <- fit.ellipse(mydata)
e <- get.ellipse(efit)
par(bg=NA)
plot(mydata, pch=3, col='gray', lwd=2, axes=F, xlab="", ylab="", type='n',
ylim=c(min(X$y_polar)-150, max(X$y_polar)), xlim=c(min(X$x_polar)-150, max(X$x_polar)))
points(X$x_polar, X$y_polar, pch=3, col='gray', lwd=2, axes=F, xlab="", ylab="")
lines(e, col="red", lwd=3, lty=2)
points(0,0,col=2, lwd=2, cex=2) #center
Well ... it sort of does the job but none would be happy with all those imaginary points considered in the calculation.
Method#02
This is another indirect way of fitting the data but again the ellipse center is not at the origin. Any workaround?
require(car)
dataEllipse(X$x_polar, X$y_polar, levels=c(0.15, 0.7),
xlim=c(-150, 400), ylim=c(-200,300))
My questions: (a) is there a robust alternative way of fitting these points with the ellipse center at the origin (0,0)? (b) is there a measure of the goodness of ellipse fit? Thank you in advance.
I'm not really happy with aproach I've concieved, there should be a closed form solution, but still:
# Ellipse equasion with center in (0, 0) with semiaxis pars[1] and pars[2] rotated by pars[3].
# t and pars[3] in radians
ellipsePoints <- function(t, pars) {
data.frame(x = cos(pars[3]) * pars[1] * cos(t) - sin(pars[3]) * pars[2] * sin(t),
y = sin(pars[3]) * pars[1] * cos(t) + cos(pars[3]) * pars[2] * sin(t))
}
# Way to fit an ellipse through minimising distance to data points.
# If weighted then points which are most remote from center will have bigger impact.
ellipseBrute <- function(x, y, pars, weighted = FALSE) {
d <- sqrt(x**2 + y**2)
t <- asin(y/d)
w <- (d/sum(d))**weighted
t[x == 0 & y == 0] <- 0
ep <- ellipsePoints(t, pars)
sum(w*(sqrt(ep$x**2 + ep$y**2) - d)**2)
}
# Fit through optim.
opt_res <- optim(c(diff(range(X$x_polar)),
diff(range(X$y_polar)),
2*pi)/2,
ellipseBrute,
x = X$x_polar, y = X$y_polar,
weighted = TRUE
)
# Check resulting ellipse throuh plot
df <- ellipsePoints(seq(0, 2*pi, length.out = 1e3), opt_res$par)
plot(y ~ x, df, col = 'blue', t = 'l',
xlim = range(c(X$x_polar, df$x)),
ylim = range(c(X$y_polar, df$y)))
points(0, 0, pch = 3, col = 'blue')
points(y_polar ~ x_polar, X)

Adding confidence intervals to plot from simulation data in R

I've created a probit simulation based on a likelihood function and simulation, all of which can be replicated with the code below.
This is the likelihood function:
probit.ll <- function(par,ytilde,x) {
a <- par[1]
b <- par[2]
return( -sum( pnorm(ytilde*(a + b*x),log=TRUE) ))
}
This is the function to do the estimates:
my.probit <- function(y,x) {
# use OLS to get start values
par <- lm(y~x)$coefficients
ytilde <- 2*y-1
# Run optim
res <- optim(par,probit.ll,hessian=TRUE,ytilde=ytilde,x=x)
# Return point estimates and SE based on the inverse of Hessian
names(res$par) <- c('a','b')
se=sqrt(diag(solve(res$hessian)))
names(se) <- c('a','b')
return(list(par=res$par,se=se,cov=solve(res$hessian)))
}
And this is the function to generate the simulated model:
probit.data <- function(N=100,a=1,b=1) {
x <- rnorm(N)
y.star <- a + b*x + rnorm(N)
y <- (y.star > 0)
return( as.data.frame(cbind(y,x,y.star)) )
}
This simulates an n size equal 100:
probit.data100 <- function(N=100,a=2,b=1) {
x <- rnorm(N)
y.star <- a + b*x + rnorm(N)
y <- (y.star > 0)
return( as.data.frame(cbind(y,x,y.star)) )
}
#predicted value
se.probit.phat100 <- function(x, par, V) {
z <- par[1] + par[2] * x
# Derivative of q w.r.t. alpha and beta
J <- c( dnorm(z), dnorm(z)*par[2] )
return( sqrt(t(J) %*% V %*% J) )
}
dat100 <- probit.data100()
res100 <- my.probit(dat100$y,dat100$x)
res100
This function below will calculate the confidence intervals based on a non-parametric bootstrap approach (notice the sample function being used):
N <- dim(probit.data(N=100, a=1, b=1))[1]
npb.par <- matrix(NA,100,2)
colnames(npb.par) <- c("alpha","beta")
npb.eystar <- matrix(NA,100,N)
for (t in 1:100) {
thisdta <- probit.data(N=100, a=1, b=1)[sample(1:N,N,replace=TRUE),]
npb.par[t,] <- my.probit(thisdta$y,thisdta$x)$par
}
This function below just cleans up the bootstrap output, and the confidence intervals are what I would like to plot:
processres <- function(simres) {
z <- t(apply(simres,2,function(x) { c(mean(x),median(x),sd(x),quantile(x,c(0.05,0.95))) } ))
rownames(z) <- colnames(simres)
colnames(z) <- c("mean","median","sd","5%","95%")
z
}
processres(npb.par)
I would like to plot a graph like this (the one below), but add confidence intervals based on the processres function above. How can these confidence intervals be added to the plot?
x <- seq(-5,5,length=100)
plot(x, pnorm(1 - 0.5*x), ty='l', lwd=2, bty='n', xlab='x', ylab="Pr(y=1)")
rug(dat100$x)
I'm also open to a different plot code and/or package. I just want a graph based on this simulation with added confidence intervals.
Thanks!
Here's a way to add a shaded CI based on simulation results:
UPDATE: this now plots the expected curve (i.e. using mean alpha & beta values), and correctly passes these means to rnorm.
x <- seq(-5,5,length=100)
plot(x, pnorm(1 - 0.5*x), ty='n', lwd=2, bty='n', xlab='x', ylab="Pr(y=1)",
xaxs = 'i', ylim=c(0, 1))
params <- processres(npb.par)
sims <- 100000
sim.mat <- matrix(NA, ncol=length(x), nrow=sims)
for (i in 1:sims) {
alpha <- rnorm(1, params[1, 1], params[1, 3])
beta <- rnorm(1, params[2, 1], params[2, 3])
sim.mat[i, ] <- pnorm(alpha - beta*x)
}
CI <- apply(sim.mat, 2, function(x) quantile(x, c(0.05, 0.95)))
polygon(c(x, rev(x)), c(CI[1, ], rev(CI[2, ])), col='gray', border=NA)
lines(x, pnorm(params[1, 1] - params[2, 1]*x), lwd=2)
rug(dat100$x)
box()

Visual Comparison of Regression & PCA

I'm trying to perfect a method for comparing regression and PCA, inspired by the blog Cerebral Mastication which has also has been discussed from a different angle on SO. Before I forget, many thanks to JD Long and Josh Ulrich for much of the core of this. I'm going to use this in a course next semester. Sorry this is long!
UPDATE: I found a different approach which almost works (please fix it if you can!). I posted it at the bottom. A much smarter and shorter approach than I was able to come up with!
I basically followed the previous schemes up to a point: Generate random data, figure out the line of best fit, draw the residuals. This is shown in the second code chunk below. But I also dug around and wrote some functions to draw lines normal to a line through a random point (the data points in this case). I think these work fine, and they are shown in First Code Chunk along with proof they work.
Now, the Second Code Chunk shows the whole thing in action using the same flow as #JDLong and I'm adding an image of the resulting plot. Data in black, red is the regression with residuals pink, blue is the 1st PC and the light blue should be the normals, but obviously they are not. The functions in First Code Chunk that draw these normals seem fine, but something is not right with the demonstration: I think I must be misunderstanding something or passing the wrong values. My normals come in horizontal, which seems like a useful clue (but so far, not to me). Can anyone see what's wrong here?
Thanks, this has been vexing me for a while...
First Code Chunk (Functions to Draw Normals and Proof They Work):
##### The functions below are based very loosely on the citation at the end
pointOnLineNearPoint <- function(Px, Py, slope, intercept) {
# Px, Py is the point to test, can be a vector.
# slope, intercept is the line to check distance.
Ax <- Px-10*diff(range(Px))
Bx <- Px+10*diff(range(Px))
Ay <- Ax * slope + intercept
By <- Bx * slope + intercept
pointOnLine(Px, Py, Ax, Ay, Bx, By)
}
pointOnLine <- function(Px, Py, Ax, Ay, Bx, By) {
# This approach based upon comingstorm's answer on
# stackoverflow.com/questions/3120357/get-closest-point-to-a-line
# Vectorized by Bryan
PB <- data.frame(x = Px - Bx, y = Py - By)
AB <- data.frame(x = Ax - Bx, y = Ay - By)
PB <- as.matrix(PB)
AB <- as.matrix(AB)
k_raw <- k <- c()
for (n in 1:nrow(PB)) {
k_raw[n] <- (PB[n,] %*% AB[n,])/(AB[n,] %*% AB[n,])
if (k_raw[n] < 0) { k[n] <- 0
} else { if (k_raw[n] > 1) k[n] <- 1
else k[n] <- k_raw[n] }
}
x = (k * Ax + (1 - k)* Bx)
y = (k * Ay + (1 - k)* By)
ans <- data.frame(x, y)
ans
}
# The following proves that pointOnLineNearPoint
# and pointOnLine work properly and accept vectors
par(mar = c(4, 4, 4, 4)) # otherwise the plot is slightly distorted
# and right angles don't appear as right angles
m <- runif(1, -5, 5)
b <- runif(1, -20, 20)
plot(-20:20, -20:20, type = "n", xlab = "x values", ylab = "y values")
abline(b, m )
Px <- rnorm(10, 0, 4)
Py <- rnorm(10, 0, 4)
res <- pointOnLineNearPoint(Px, Py, m, b)
points(Px, Py, col = "red")
segments(Px, Py, res[,1], res[,2], col = "blue")
##========================================================
##
## Credits:
## Theory by Paul Bourke http://local.wasp.uwa.edu.au/~pbourke/geometry/pointline/
## Based in part on C code by Damian Coventry Tuesday, 16 July 2002
## Based on VBA code by Brandon Crosby 9-6-05 (2 dimensions)
## With grateful thanks for answering our needs!
## This is an R (http://www.r-project.org) implementation by Gregoire Thomas 7/11/08
##
##========================================================
Second Code Chunk (Plots the Demonstration):
set.seed(55)
np <- 10 # number of data points
x <- 1:np
e <- rnorm(np, 0, 60)
y <- 12 + 5 * x + e
par(mar = c(4, 4, 4, 4)) # otherwise the plot is slightly distorted
plot(x, y, main = "Regression minimizes the y-residuals & PCA the normals")
yx.lm <- lm(y ~ x)
lines(x, predict(yx.lm), col = "red", lwd = 2)
segments(x, y, x, fitted(yx.lm), col = "pink")
# pca "by hand"
xyNorm <- cbind(x = x - mean(x), y = y - mean(y)) # mean centers
xyCov <- cov(xyNorm)
eigenValues <- eigen(xyCov)$values
eigenVectors <- eigen(xyCov)$vectors
# Add the first PC by denormalizing back to original coords:
new.y <- (eigenVectors[2,1]/eigenVectors[1,1] * xyNorm[x]) + mean(y)
lines(x, new.y, col = "blue", lwd = 2)
# Now add the normals
yx2.lm <- lm(new.y ~ x) # zero residuals: already a line
res <- pointOnLineNearPoint(x, y, yx2.lm$coef[2], yx2.lm$coef[1])
points(res[,1], res[,2], col = "blue", pch = 20) # segments should end here
segments(x, y, res[,1], res[,2], col = "lightblue1") # the normals
############ UPDATE
Over at Vincent Zoonekynd's Page I found almost exactly what I wanted. But, it doesn't quite work (obviously used to work). Here is a code excerpt from that site which plots normals to the first PC reflected through a vertical axis:
set.seed(1)
x <- rnorm(20)
y <- x + rnorm(20)
plot(y~x, asp = 1)
r <- lm(y~x)
abline(r, col='red')
r <- princomp(cbind(x,y))
b <- r$loadings[2,1] / r$loadings[1,1]
a <- r$center[2] - b * r$center[1]
abline(a, b, col = "blue")
title(main='Appears to use the reflection of PC1')
u <- r$loadings
# Projection onto the first axis
p <- matrix( c(1,0,0,0), nrow=2 )
X <- rbind(x,y)
X <- r$center + solve(u, p %*% u %*% (X - r$center))
segments( x, y, X[1,], X[2,] , col = "lightblue1")
And here is the result:
Alright, I'll have to answer my own question! After further reading and comparison of methods that people have put on the internet, I have solved the problem. I'm not sure I can clearly state what I "fixed" because I went through quite a few iterations. Anyway, here is the plot and the code (MWE). The helper functions are at the end for clarity.
# Comparison of Linear Regression & PCA
# Generate sample data
set.seed(39) # gives a decent-looking example
np <- 10 # number of data points
x <- -np:np
e <- rnorm(length(x), 0, 10)
y <- rnorm(1, 0, 2) * x + 3*rnorm(1, 0, 2) + e
# Plot the main data & residuals
plot(x, y, main = "Regression minimizes the y-residuals & PCA the normals", asp = 1)
yx.lm <- lm(y ~ x)
lines(x, predict(yx.lm), col = "red", lwd = 2)
segments(x, y, x, fitted(yx.lm), col = "pink")
# Now the PCA using built-in functions
# rotation = loadings = eigenvectors
r <- prcomp(cbind(x,y), retx = TRUE)
b <- r$rotation[2,1] / r$rotation[1,1] # gets slope of loading/eigenvector 1
a <- r$center[2] - b * r$center[1]
abline(a, b, col = "blue") # Plot 1st PC
# Plot normals to 1st PC
X <- pointOnLineNearPoint(x, y, b, a)
segments( x, y, X[,1], X[,2], col = "lightblue1")
###### Needed Functions
pointOnLineNearPoint <- function(Px, Py, slope, intercept) {
# Px, Py is the point to test, can be a vector.
# slope, intercept is the line to check distance.
Ax <- Px-10*diff(range(Px))
Bx <- Px+10*diff(range(Px))
Ay <- Ax * slope + intercept
By <- Bx * slope + intercept
pointOnLine(Px, Py, Ax, Ay, Bx, By)
}
pointOnLine <- function(Px, Py, Ax, Ay, Bx, By) {
# This approach based upon comingstorm's answer on
# stackoverflow.com/questions/3120357/get-closest-point-to-a-line
# Vectorized by Bryan
PB <- data.frame(x = Px - Bx, y = Py - By)
AB <- data.frame(x = Ax - Bx, y = Ay - By)
PB <- as.matrix(PB)
AB <- as.matrix(AB)
k_raw <- k <- c()
for (n in 1:nrow(PB)) {
k_raw[n] <- (PB[n,] %*% AB[n,])/(AB[n,] %*% AB[n,])
if (k_raw[n] < 0) { k[n] <- 0
} else { if (k_raw[n] > 1) k[n] <- 1
else k[n] <- k_raw[n] }
}
x = (k * Ax + (1 - k)* Bx)
y = (k * Ay + (1 - k)* By)
ans <- data.frame(x, y)
ans
}
Try changing this line of your code:
res <- pointOnLineNearPoint(x, y, yx2.lm$coef[2], yx2.lm$coef[1])
to
res <- pointOnLineNearPoint(x, new.y, yx2.lm$coef[2], yx2.lm$coef[1])
So you're calling the correct y values.
In Vincent Zoonekynd's code, change the line u <- r$loadings to u <- solve(r$loadings). In the second instance of solve(), the predicted component scores along the first principal axis (i.e., the matrix of predicted scores with the second predicted components scores set to zero) need to be multiplied by the inverse of the loadings/eigenvectors. Multiplying data by the loadings gives predicted scores; dividing predicted scores by the loadings give data. Hope that helps.

Resources