Related
I tried binary logistic regression with BFGS using maxlik, but i have included the feature as per the syntax i attached below, but the result is, but i get output like this
Maximum Likelihood estimation
BFGS maximization, 0 iterations
*Return code 100: Initial value out of range.
https://docs.google.com/spreadsheets/d/1fVLeJznB9k29FQ_BdvdCF8ztkOwbdFpx/edit?usp=sharing&ouid=109040212946671424093&rtpof=true&sd=true (this is my data)*
library(maxLik)
library(optimx)
data=read_excel("Book2.xlsx")
data$JKLaki = ifelse(data$JK==1,1,0)
data$Daerah_Samarinda<- ifelse(data$Daerah==1,1,0)
data$Prodi2 = ifelse(data$Prodi==2,1,0)
data$Prodi3 = ifelse(data$Prodi==3,1,0)
data$Prodi4 = ifelse(data$Prodi==4,1,0)
str(data)
attach(data)
ll<- function(param){
mu <- param[1]
beta <- param[-1]
y<- as.vector(data$Y)
x<- cbind(1, data$JKLaki, data$IPK, data$Daerah_Samarinda, data$Prodi2, data$Prodi3, data$Prodi4)
xb<- x%*%beta
pi<- exp(xb)
val <- -sum(y * log(pi) + (1 - y) * log(1 - pi),log=TRUE)
return(val)
}
gl<- funtion(param){
mu <- param[1]
beta <- param[-1]
y <- as.vector(data$Y)
x <- cbind(0, data$JKLaki,data$IPK,data$Daerah_Samarinda,data$Prodi2,data$Prodi3,data$Prodi4)
sigma <- x*beta
pi<- exp(sigma)/(1+exp(sigma))
v= y-pi
vx=as.matrix(x)%*%as.vector(v)
gg= colSums(vx)
return(-gg)}
mle<-maxLik(logLik=ll, grad=gl,hess=NULL,
start=c(mu=1, beta1=0, beta2=0, beta3=0, beta4=0, beta5=0, beta6=0,beta7=0), method="BFGS")
summary(mle)
can i get some help, i tired get this solution, please.
I have been able to optimize the log-likelihood with the following code :
library(DEoptim)
library(readxl)
data <- read_excel("Book2.xlsx")
data$JKLaki <- ifelse(data$JK == 1, 1, 0)
data$Daerah_Samarinda <- ifelse(data$Daerah == 1, 1, 0)
data$Prodi2 <- ifelse(data$Prodi == 2, 1, 0)
data$Prodi3 <- ifelse(data$Prodi == 3, 1, 0)
data$Prodi4 <- ifelse(data$Prodi == 4, 1, 0)
ll <- function(param, data)
{
mu <- param[1]
beta <- param[-1]
y <- as.vector(data$Y)
x <- cbind(1, data$JKLaki, data$IPK, data$Daerah_Samarinda, data$Prodi2, data$Prodi3, data$Prodi4)
xb <- x %*% beta
pi <- exp(mu + xb)
val <- -sum(y * log(pi) + (1 - y) * log(1 - pi))
if(is.nan(val) == TRUE)
{
return(10 ^ 30)
}else
{
return(val)
}
}
lower <- rep(-500, 8)
upper <- rep(500, 8)
obj_DEoptim_Iter1 <- DEoptim(fn = ll, lower = lower, upper = upper,
control = list(itermax = 5000), data = data)
lower <- obj_DEoptim_Iter1$optim$bestmem - 0.25 * abs(obj_DEoptim_Iter1$optim$bestmem)
upper <- obj_DEoptim_Iter1$optim$bestmem + 0.25 * abs(obj_DEoptim_Iter1$optim$bestmem)
obj_DEoptim_Iter2 <- DEoptim(fn = ll, lower = lower, upper = upper,
control = list(itermax = 5000), data = data)
obj_Optim <- optim(par = obj_DEoptim_Iter2$optim$bestmem, fn = ll, data = data)
$par
par1 par2 par3 par4 par5 par6 par7
-350.91045436 347.79576145 0.05337466 0.69032735 -0.01089112 0.47465162 0.38284804
par8
0.42125664
$value
[1] 95.08457
$counts
function gradient
501 NA
$convergence
[1] 1
$message
NULL
I have written the following code.
library(quantreg)
# return the g function:
G = function(m, N, gamma) {
Tm = m * N
k = 1:Tm
Gvalue = sqrt(m) * (1 + k/m) * (k/(m + k))^gamma
return(Gvalue)
}
sqroot <- function(A) {
e = eigen(A)
v = e$vectors
val = e$values
sq = v %*% diag(sqrt(val)) %*% solve(v)
return(t(sq))
}
fa = function(m, N, a) {
Tm = m * N
k = 1:Tm
t = (m + k)/m
f_value = (t - 1) * t * (a^2 + log(t/(t - 1)))
return(sqrt(f_value))
}
m = 50
N = 2
n= 50*3
x1 = matrix(runif(n, 0, 1), ncol = 1)
x = cbind(1, x1)
beta = c(1, 1)
xb = x %*% beta
pr = 1/(1+exp(-xb))
y = rbinom(n,1,pr)
# calculate statistic:
stat = function(y, x, m, N, a) {
y_train = y[1:m]
x_train = x[(1:m),]
y_test = y[-(1:m)]
x_test = x[-(1:m),]
fit = glm(y ~ 0 + x, family="binomial")
coef = coef(fit)
log_predict = predict(fit, type="response")
sigma = sqrt(1/(m-1)* sum((y_train - log_predict)^2))
Jvalue = t(x_train) %*% x_train/m * sigma^2
Jsroot = sqroot(Jvalue)
fvalue = fa(m, N, a)
score1 = apply((x_test * as.vector((y_test - x_test %*% coef))), 2, cumsum)
statvalue1 = t(solve(Jsroot) %*% t(score1))/fvalue/sqrt(m)
statmax1 = pmax(abs(statvalue1[, 1]), abs(statvalue1[, 2]))
result = list(stat = statmax1)
return(result)
}
m =50
N = 2
a = 2.795
value = stat(y, x, m, N, a)
value
I want to perform bootstrap to obtain B = 999 number of statistics. I use the following r code. But it produces an error saying "Error in statistic(data, original, ...) :
argument "m" is missing, with no default"
library(boot)
data1 = data.frame(y = y, x = x1, m = m , N = N, a = a)
head(data1)
boot_value = boot(data1, statistic = stat, R = 999)
Can anyone give me a hint? Also, am I able to get the bootstrap results in a matrix format? Since the stat function gives 100 values.
There are different kinds of bootstrapping. If you want to draw from your data 999 samples with replications of same size of your data you may just use replicate, no need for packages.
We put the data to be resampled into a data frame. It looks to me like m, N, a remain constant, so we just provide it as vectors.
data2 <- data.frame(y=y, x=x)
stat function needs to be adapted to unpack y and x-matrix. At the bottom we remove the list call to get just a vector back. unnameing will just give us the numbers.
stat2 <- function(data, m, N, a) {
y_train <- data[1:m, 1]
x_train <- as.matrix(data[1:m, 2:3])
y_test <- data[-(1:m), 1]
x_test <- as.matrix(data[-(1:m), 2:3])
y <- data[, "y"]
x <- as.matrix(data[, 2:3])
fit <- glm(y ~ 0 + x, family="binomial")
coef <- coef(fit)
log_predict <- predict(fit, type="response")
sigma <- sqrt(1/(m-1) * sum((y_train - log_predict)^2))
Jvalue <- t(x_train) %*% x_train/m * sigma^2
Jsroot <- sqroot(Jvalue)
fvalue <- fa(m, N, a)
score1 <- apply((x_test * as.vector((y_test - x_test %*% coef))), 2, cumsum)
statvalue1 <- t(solve(Jsroot) %*% t(score1))/fvalue/sqrt(m)
statmax1 <- pmax(abs(statvalue1[, 1]), abs(statvalue1[, 2]))
result <- unname(statmax1)
return(result)
}
replicate is a cousin of sapply, designed for repeated evaluation. In the call we just sample the rows 999 times and already get a matrix back. As in sapply we need to transform our result.
res <- t(replicate(999, stat2(data2[sample(1:nrow(data2), nrow(data2), replace=TRUE), ], m, N, a)))
Result
As result we get 999 bootstrap replications in the rows with 100 attributes in the columns.
str(res)
# num [1:999, 1:100] 0.00205 0.38486 0.10146 0.12726 0.47056 ...
The code also runs quite fast.
user system elapsed
3.46 0.01 3.49
Note, that there are different kinds of bootstrapping. E.g. sometimes just a part of the sample is resampled, weights are used, clustering is applied etc. Since you attempted to use boot the method shown should be the default, though.
I implemented this simple NN but even when making it do all the interactions it fails to converge and the MSE remains very high
I tried to change the number of iterations and the learning rate but it doesn't work
rm(list=ls())
data <- read.csv("C:/Users/Mikele/Documents/Uni/IA AI & Machine Learning/R/11_23_2018/wine.csv",sep = ',',header = FALSE)
x <- data[,1:11]
y <- as.matrix(data[,12])
y_matrix <- matrix(rep(0,length(y)),nrow = length(y), ncol = 6)
k <-1
for (w in 1:length(y))
{
temp <- y[k] - 2
y_matrix[k,temp] <-1
k <- k + 1
}
hl <- c(40, 30, 20)
iter <- 1000
lr <- 0.1
## add in intercept
x_1 <- as.matrix(cbind(rep(1, nrow(x)),x))
## set error array
error <- rep(0, iter)
## set up weights
## the +1 is to add in the intercept/bias parameter
W1 <- matrix(runif(ncol(x_1)*hl[1], -1, 1), nrow = ncol(x_1))
W2 <- matrix(runif((hl[1]+1)*hl[2], -1, 1), nrow = hl[1]+1)
W3 <- matrix(runif((hl[2]+1)*hl[3], -1, 1), nrow = hl[2]+1)
W4 <- matrix(runif((hl[3]+1)*ncol(y), -1, 1), nrow = hl[3]+1)
for(k in 1:iter)
{
# calculate the hidden and output layers using X and hidden layer as inputs
# hidden layer 1 and 2 have a column of ones appended for the bias term
hidden1 <- cbind(matrix(1, nrow = nrow(x_1)), sigm(x_1 %*% W1))
hidden2 <- cbind(matrix(1, nrow = nrow(x_1)), sigm(hidden1 %*% W2))
hidden3 <- cbind(matrix(1, nrow = nrow(x_1)), sigm(hidden2 %*% W3))
y_hat <- sigm(hidden3 %*% W4)
# calculate the gradient and back prop the errors
# see theory above
y_hat_del <- (y-y_hat)*(d.sigm(y_hat))
hidden3_del <- y_hat_del %*% t(W4)*d.sigm(hidden3)
hidden2_del <- hidden3_del[,-1] %*% t(W3)*d.sigm(hidden2)
hidden1_del <- hidden2_del[,-1] %*% t(W2)*d.sigm(hidden1)
# update the weights
W4 <- W4 + lr*t(hidden3) %*% y_hat_del
W3 <- W3 + lr*t(hidden2) %*% hidden3_del[,-1]
W2 <- W2 + lr*t(hidden1) %*% hidden2_del[,-1]
W1 <- W1 + lr*t(x_1) %*% hidden1_del[,-1]
error[k] <- 1/nrow(y)*sum((y-y_hat)^2)
if((k %% (10^4+1)) == 0) cat("mse:", error[k], "\n")
}
# plot loss
xvals <- seq(1, iter, length = 100)
print(qplot(xvals, error[xvals], geom = "line", main = "MSE", xlab = "Iteration"))
no error message but I can't understand how to make a deep NN for
Multivariate Linear Regression
in addition I divided the ys into a 6-column matrix (the maximum and minimum of the initial dataset) now there is someone who can help me understand why not cover and in any case the final results are all concentrated on column 4?
I wonder if it is possible to efficiently change ncp in the below code such that x becomes .025 and .975 (within rounding error).
x <- pt(q = 5, df = 19, ncp = ?)
----------
Clarification
q = 5 and df = 19 (above) are just two hypothetical numbers, so q and df could be any other two numbers. What I expect is a function / routine, that takes q and df as input.
What is wrong with uniroot?
f <- function (ncp, alpha) pt(q = 5, df = 19, ncp = ncp) - alpha
par(mfrow = c(1,2))
curve(f(ncp, 0.025), from = 5, to = 10, xname = "ncp", main = "0.025")
abline(h = 0)
curve(f(ncp, 0.975), from = 0, to = 5, xname = "ncp", main = "0.975")
abline(h = 0)
So for 0.025 case, the root lies in (7, 8); for 0.975 case, the root lies in (2, 3).
uniroot(f, c(7, 8), alpha = 0.025)$root
#[1] 7.476482
uniroot(f, c(2, 3), alpha = 0.975)$root
#[1] 2.443316
---------
(After some discussion...)
OK, now I see your ultimate goal. You want to implement this equation solver as a function, with input q and df. So they are unknown, but fixed. They might come out of an experiment.
Ideally if there is an analytical solution, i.e., ncp can be written as a formula in terms of q, df and alpha, that would be so great. However, this is not possible for t-distribution.
Numerical solution is the way, but uniroot is not a great option for this purpose, as it relies on "plot - view - guess - specification". The answer by loki is also crude but with some improvement. It is a grid search, with fixed step size. Start from a value near 0, say 0.001, and increase this value and check for approximation error. We stop when this error fails to decrease.
This really initiates the idea of numerical optimization with Newton-method or quasi-Newton method. In 1D case, we can use function optimize. It does variable step size in searching, so it converges faster than a fixed step-size searching.
Let's define our function as:
ncp_solver <- function (alpha, q, df) {
## objective function: we minimize squared approximation error
obj_fun <- function (ncp, alpha = alpha, q = q, df = df) {
(pt(q = q, df = df, ncp = ncp) - alpha) ^ 2
}
## now we call `optimize`
oo <- optimize(obj_fun, interval = c(-37.62, 37.62), alpha = alpha, q = q, df = df)
## post processing
oo <- unlist(oo, use.names = FALSE) ## list to numerical vector
oo[2] <- sqrt(oo[2]) ## squared error to absolute error
## return
setNames(oo, c("ncp", "abs.error"))
}
Note, -37.62 / 37.62 is chosen as lower / upper bound for ncp, as it is the maximum supported by t-distribution in R (read ?dt).
For example, let's try this function. If you, as given in your question, has q = 5 and df = 19:
ncp_solver(alpha = 0.025, q = 5, df = 19)
# ncp abs.error
#7.476472e+00 1.251142e-07
The result is a named vector, with ncp and absolute approximation error.
Similarly we can do:
ncp_solver(alpha = 0.975, q = 5, df = 19)
# ncp abs.error
#2.443347e+00 7.221928e-07
----------
Follow up
Is it possible that in the function ncp_solver(), alpha takes a c(.025, .975) together?
Why not wrapping it up for a "vectorization":
sapply(c(0.025, 0.975), ncp_solver, q = 5, df = 19)
# [,1] [,2]
#ncp 7.476472e+00 2.443347e+00
#abs.error 1.251142e-07 7.221928e-07
How come 0.025 gives upper bound of confidence interval, while 0.975 gives lower bound of confidence interval? Should this relationship reversed?
No surprise. By default pt computes lower tail probability. If you want the "right" relationship, set lower.tail = FALSE in pt:
ncp_solver <- function (alpha, q, df) {
## objective function: we minimize squared approximation error
obj_fun <- function (ncp, alpha = alpha, q = q, df = df) {
(pt(q = q, df = df, ncp = ncp, lower.tail = FALSE) - alpha) ^ 2
}
## now we call `optimize`
oo <- optimize(obj_fun, interval = c(-37.62, 37.62), alpha = alpha, q = q, df = df)
## post processing
oo <- unlist(oo, use.names = FALSE) ## list to numerical vector
oo[2] <- sqrt(oo[2]) ## squared error to absolute error
## return
setNames(oo, c("ncp", "abs.error"))
}
Now you see:
ncp_solver(0.025, 5, 19)[[1]] ## use "[[" not "[" to drop name
#[1] 2.443316
ncp_solver(0.975, 5, 19)[[1]]
#[1] 7.476492
--------
Bug report and fix
I was reported that the above ncp_solver is unstable. For example:
ncp_solver(alpha = 0.025, q = 0, df = 98)
# ncp abs.error
#-8.880922 0.025000
But on the other hand, if we double check with uniroot here:
f <- function (ncp, alpha) pt(q = 0, df = 98, ncp = ncp, lower.tail = FALSE) - alpha
curve(f(ncp, 0.025), from = -3, to = 0, xname = "ncp"); abline(h = 0)
uniroot(f, c(-2, -1.5), 0.025)$root
#[1] -1.959961
So there is clearly something wrong with ncp_solver.
Well it turns out that we can not use too big bound, c(-37.62, 37.62). If we narrow it to c(-35, 35), it will be alright.
Also, to avoid tolerance problem, we can change objective function from squared error to absolute error:
ncp_solver <- function (alpha, q, df) {
## objective function: we minimize absolute approximation error
obj_fun <- function (ncp, alpha = alpha, q = q, df = df) {
abs(pt(q = q, df = df, ncp = ncp, lower.tail = FALSE) - alpha)
}
## now we call `optimize`
oo <- optimize(obj_fun, interval = c(-35, 35), alpha = alpha, q = q, df = df)
## post processing and return
oo <- unlist(oo, use.names = FALSE) ## list to numerical vector
setNames(oo, c("ncp", "abs.error"))
}
ncp_solver(alpha = 0.025, q = 0, df = 98)
# ncp abs.error
#-1.959980e+00 9.190327e-07
Damn, this is a pretty annoying bug. But relax now.
Report on getting warning messages from pt
I also receive some report on annoying warning messages from pt:
ncp_solver(0.025, -5, 19)
# ncp abs.error
#-7.476488e+00 5.760562e-07
#Warning message:
#In pt(q = q, df = df, ncp = ncp, lower.tail = FALSE) :
# full precision may not have been achieved in 'pnt{final}'
I am not too sure what is going on here, but meanwhile I did not observe misleading result. Therefore, I decide to suppress those warnings from pt, using suppressWarnings:
ncp_solver <- function (alpha, q, df) {
## objective function: we minimize absolute approximation error
obj_fun <- function (ncp, alpha = alpha, q = q, df = df) {
abs(suppressWarnings(pt(q = q, df = df, ncp = ncp, lower.tail = FALSE)) - alpha)
}
## now we call `optimize`
oo <- optimize(obj_fun, interval = c(-35, 35), alpha = alpha, q = q, df = df)
## post processing and return
oo <- unlist(oo, use.names = FALSE) ## list to numerical vector
setNames(oo, c("ncp", "abs.error"))
}
ncp_solver(0.025, -5, 19)
# ncp abs.error
#-7.476488e+00 5.760562e-07
OK, quiet now.
You could use two while loops like this:
i <- 0.001
lowerFound <- FALSE
while(!lowerFound){
x <- pt(q = 5, df = 19, ncp = i)
if (round(x, 3) == 0.025){
lowerFound <- TRUE
print(paste("Lower is", i))
lower <- i
} else {
i <- i + 0.0005
}
}
i <- 0.001
upperFound <- FALSE
while(!upperFound){
x <- pt(q = 5, df = 19, ncp = i)
if (round(x, 3) == 0.975){
upperFound <- TRUE
print(paste("Upper is ", i))
upper <- i
} else {
i <- i + 0.0005
}
}
c(Lower = lower, Upper = upper)
# Lower Upper
# 7.4655 2.4330
Of course, you can adapt the increment in i <- i + .... or change the check if (round(x,...) == ....) to fit this solution to your specific needs of accuracy.
I know this is an old question, but there is now a one-line solution to this problem using the conf.limits.nct() function in the MBESS package.
install.packages("MBESS")
library(MBESS)
result <- conf.limits.nct(t.value = 5, df = 19)
result
$Lower.Limit
[1] 2.443332
$Prob.Less.Lower
[1] 0.025
$Upper.Limit
[1] 7.476475
$Prob.Greater.Upper
[1] 0.025
$Lower.Limit is the result where pt = 0.975
$Upper.Limit is the result where pt = 0.025
pt(q=5,df=19,ncp=result$Lower.Limit)
[1] 0.975
> pt(q=5,df=19,ncp=result$Upper.Limit)
[1] 0.025
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 3 years ago.
Improve this question
I have some points, and both of each point's coordinates have variances. It is stored in arrays (just an example):
x <- c(1, 2, 3, 4, 5)
y <- c(1, 2, 3, 4, 5)
dx <- c(0.1, 0.1, 0.1, 0.1, 0.1)
dy <- c(0.1, 0.1, 0.1, 0.1, 0.1)
and each point's coordinates are (x +/- dx, y +/- dy).
I want to fit it with line y=k*x and get result: k +/- dk.
Terry Therneau answered this on rhelp earlier this year, citing a 1987 paper by Prof. Ripley:
Besides "total least squares" it is also call Deming regression and orthogonal regression:
Rhelp text at Baron's R Search page
# Generalized Deming regression, based on Ripley, Analyst, 1987:377-383.
#
deming <- function(x, y, xstd, ystd, jackknife=TRUE, dfbeta=FALSE,
scale=TRUE) {
Call <- match.call()
n <- length(x)
if (length(y) !=n) stop("x and y must be the same length")
if (length(xstd) != length(ystd))
stop("xstd and ystd must be the same length")
# Do missing value processing
nafun <- get(options()$na.action)
if (length(xstd)==n) {
tdata <- nafun(data.frame(x=x, y=y, xstd=xstd, ystd=ystd))
x <- tdata$x
y <- tdata$y
xstd <- tdata$xstd
ystd <- tdata$ystd
}
else {
tdata <- nafun(data.frame(x=x, y=y))
x <- tdata$x
y <- tdata$y
if (length(xstd) !=2) stop("Wrong length for std specification")
xstd <- xstd[1] + xstd[2]*x
ystd <- ystd[1] + ystd[2] * y
}
if (any(xstd <=0) || any(ystd <=0)) stop("Std must be positive")
minfun <- function(beta, x, y, xv, yv) {
w <- 1/(yv + beta^2*xv)
alphahat <- sum(w * (y - beta*x))/ sum(w)
sum(w*(y-(alphahat + beta*x))^2)
}
minfun0 <- function(beta, x, y, xv, yv) {
w <- 1/(yv + beta^2*xv)
alphahat <- 0 #constrain to zero
sum(w*(y-(alphahat + beta*x))^2)
}
afun <-function(beta, x, y, xv, yv) {
w <- 1/(yv + beta^2*xv)
sum(w * (y - beta*x))/ sum(w)
}
fit <- optimize(minfun, c(.1, 10), x=x, y=y, xv=xstd^2, yv=ystd^2)
coef = c(intercept=afun(fit$minimum, x, y, xstd^2, ystd^2),
slope=fit$minimum)
fit0 <- optimize(minfun0, coef[2]*c(.5, 1.5), x=x, y=y,
xv=xstd^2, yv=ystd^2)
w <- 1/(ystd^2 + (coef[2]*xstd)^2) #weights
u <- w*(ystd^2*x + xstd^2*coef[2]*(y-coef[1])) #imputed "true" value
if (is.logical(scale) && scale) {
err1 <- (x-u)/ xstd
err2 <- (y - (coef[1] + coef[2]*u))/ystd
sigma <- sum(err1^2 + err2^2)/(n-2)
# Ripley's paper has err = [y - (a + b*x)] * sqrt(w); gives the same SS
}
else sigma <- scale^2
test1 <- (coef[2] -1)*sqrt(sum(w *(x-u)^2)/sigma) #test for beta=1
test2 <- coef[1]*sqrt(sum(w*x^2)/sum(w*(x-u)^2) /sigma) #test for a=0
rlist <- list(coefficient=coef, test1=test1, test0=test2, scale=sigma,
err1=err1, err2=err2, u=u)
if (jackknife) {
delta <- matrix(0., nrow=n, ncol=2)
for (i in 1:n) {
fit <- optimize(minfun, c(.5, 1.5)*coef[2],
x=x[-i], y=y[-i], xv=xstd[-i]^2, yv=ystd[-i]^2)
ahat <- afun(fit$minimum, x[-i], y[-i], xstd[-i]^2, ystd[-i]^2)
delta[i,] <- coef - c(ahat, fit$minimum)
}
rlist$variance <- t(delta) %*% delta
if (dfbeta) rlist$dfbeta <- delta
}
rlist$call <- Call
class(rlist) <- 'deming'
rlist
}
print.deming <- function(x, ...) {
cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
if (is.null(x$variance)) {
table <- matrix(0., nrow=2, ncol=3)
table[,1] <- x$coefficient
table[,2] <- c(x$test0, x$test1)
table[,3] <- pnorm(-2*abs(table[,2]))
dimnames(table) <- list(c("Intercept", "Slope"),
c("Coef", "z", "p"))
}
else {
table <- matrix(0., nrow=2, ncol=4)
table[,1] <- x$coefficient
table[,2] <- sqrt(diag(x$variance))
table[,3] <- c(x$test0, x$test1)
table[,4] <- pnorm(-2*abs(table[,3]))
dimnames(table) <- list(c("Intercept", "Slope"),
c("Coef", "se(coef)", "z", "p"))
}
print(table, ...)
cat("\n Scale=", format(x$scale, ...), "\n")
invisible(x)
}
You're looking to perform a total least squares fit. There's a whole book on this, "The total least squares problem: computational aspects and analysis", by Sabine van Huffel, Joos Vandewalle. Wikpedia's article should provide enough for you to code up a solution - it's basically "take the SVD of a slightly augmented system"