Fitting Laplace distribution to data - r

I want to fit laplace distrubution to data which density is given by formula:
As I read on wikipedia good estimator for mu parameter is median, and for tau - mean deviation from the median.
So what I did:
set.seed(42)
# Create a vector for which Laplace distribution will be fitted
vec <- rexp(1000)
# Defining laplace distribution
dlaplace <- function(x, mu, b) {
1/(2*b)*exp(-(abs(x - mu))/b)
}
#Estimating two parameters
mu <- median(vec)
tau <- mean(abs(vec-mu))
However now if we take a loot at histogram of this density fitted to our data we will end up with the image following:
library(ggplot2)
vals <- dlaplace(vec, mu, tau)
ggplot() + geom_histogram(aes(vals), binwidth = 3) +
geom_line(aes(x = 1:length(vec), y = vec))
Which suggests that it doesn't fit this distribution at all. My question is:
Is this so bad because it I randomized my vector from exponential distribution which is not Laplace, or I'm doing something incorrectly ?

Are you looking for this?
vals <- dlaplace(vec, mu, tau)
df1 <- data.frame(vec, vals)
ggplot(df1, aes(vec)) +
geom_histogram(aes(y = ..density..), fill = "grey", binwidth = 0.5) +
geom_line(aes(y = vals), color = "steelblue")

Related

Unable to plot confidence intervals using ggplot, (geom_ribbon() argument)

I am trying to plot 95% confidence intervals on some simulated values but am running into so issues when i am trying to plot the CIs using the geom_ribbon() argument. The trouble I'm having it that my model does not show the CIs when i plot them, like so;
I have included all of my code below if anyone knows where i have gone wrong here;
set.seed(20220520)
#simulating 200 values between 0 and 1 from a uniform distribution
x = runif(200, min = 0, max = 1)
lam = exp(0.3+5*x)
y = rpois(200, lambda = lam)
#before we do this each Yi may contain zeros so we need to add a small constant
y <- y + .1
#combining x and y into a dataframe so we can plot
df = data.frame(x, y)
#fitting a Poisson GLM
model2 <- glm(y ~ x,
data = df,
family = poisson(link='log'))
#make predictions (this may be the same as predictions_mod2)
preds <- predict(model2, type = "response")
#making CI predictions
predictions_mod2 = predict(model2, df, se.fit = TRUE, type = 'response')
#calculate confidence intervals limit
upper_mod2 = predictions_mod2$fit+1.96*predictions_mod2$se.fit
lower_mod2 = predictions_mod2$fit-1.96*predictions_mod2$se.fit
#transform the CI limit to get one at the level of the mean
upper_mod2 = exp(upper_mod2)/(1+exp(upper_mod2))
lower_mod2 = exp(lower_mod2)/(1+exp(lower_mod2))
#combining into a df
predframe = data.frame(lwr=lower_mod2,upr=upper_mod2, x = df$x, y = df$y)
#plot model with 95% confidence intervals using ggplot
ggplot(df, aes(x, y)) +
geom_ribbon(data = predframe, aes(ymin=lwr, ymax=upr), alpha = 0.4) +
geom_point() +
geom_line(aes(x, preds2), col = 'blue')
In a comment to the question, it's asked why not to logit transform the predicted values. The reason why is that the type of prediction asked for is "response". From the documentation, my emphasis.
type
the type of prediction required. The default is on the scale of the linear predictors; the alternative "response" is on the scale of the response variable. Thus for a default binomial model the default predictions are of log-odds (probabilities on logit scale) and type = "response" gives the predicted probabilities. The "terms" option returns a matrix giving the fitted values of each term in the model formula on the linear predictor scale.
There is a good way to answer, to show the code.
library(ggplot2, quietly = TRUE)
set.seed(20220520)
#simulating 200 values between 0 and 1 from a uniform distribution
x = runif(200, min = 0, max = 1)
lam = exp(0.3+5*x)
y = rpois(200, lambda = lam)
#before we do this each Yi may contain zeros so we need to add a small constant
y <- y + 0.1
#combining x and y into a dataframe so we can plot
df = data.frame(x, y)
#fitting a Poisson GLM
suppressWarnings(
model2 <- glm(y ~ x,
data = df,
family = poisson(link='log'))
)
#make predictions (this may be the same as predictions_mod2)
preds <- predict(model2, type = "response")
#making CI predictions
predictions_mod2 = predict(model2, df, se.fit = TRUE, type = 'response')
#calculate confidence intervals limit
upper_mod2 = predictions_mod2$fit+1.96*predictions_mod2$se.fit
lower_mod2 = predictions_mod2$fit-1.96*predictions_mod2$se.fit
#combining into a df
predframe = data.frame(lwr=lower_mod2,upr=upper_mod2, x = df$x, y = df$y)
#plot model with 95% confidence intervals using ggplot
ggplot(df, aes(x, y)) +
geom_ribbon(data = predframe, aes(ymin=lwr, ymax=upr), alpha = 0.4) +
geom_point() +
geom_line(aes(x, preds), col = 'blue')
Created on 2022-05-29 by the reprex package (v2.0.1)

Computing profile confidence intervals for a regression line

Simulate some data
n <- 50
beta0 <- 2
beta1 <- 0.32
x <- runif(n=n, min=0, max=5)
mu <- exp(beta0 + beta1 * x)
y <- rpois(n=n, lambda=mu)
data <- data.frame(x=x, y=y)
plot(data$x, data$y)
Apply a Poisson regression
glm.pois <- glm(formula= y ~ x, data=data, family=poisson(link="log"))
summary(glm.pois)
Compute profile 95% CI
(pCI <- confint(glm.pois))
nice.xs <- sort(data$x)
pred.pCI <- apply(t(pCI), 1, FUN=function(x){exp( x[1]+x[2]*nice.xs)})
Compute 95% CI using the SE
new.dat <- data.frame(phat = predict(glm.pois, type="response"),
x = data$x)
new.dat <- new.dat[with(new.dat, order(x)), ]
ilink <- poisson()$linkinv
## Add fit and se.fit on the link scale
new.dat <- bind_cols(new.dat,
setNames(as_tibble(predict(glm.pois, new.dat,
se.fit = TRUE)[1:2]),
c('fit_link','se_link')))
new.dat <- mutate(new.dat,
fit = ilink(fit_link),
right_upr = ilink(fit_link + (2 * se_link)),
right_lwr = ilink(fit_link - (2 * se_link)))
Plot
ggplot(data=new.dat, aes(x=x, y= phat)) +
geom_point() +
geom_ribbon(aes(x=x, ymin =right_lwr, ymax = right_upr),
fill = "darkgreen", alpha=0.3) +
geom_ribbon(aes(x=nice.xs, ymin = pred.pCI[,1], ymax = pred.pCI[,2]),
fill = "darkorange", alpha=0.3) +
theme_classic(base_size=15)
My question is why are the profile 95% CI (orange ribbon) so large? I imagine I am computing them wrong but I don't know where my mistake is.
There is a very strong correlation between the intercept and slope:
cov2cor(vcov(glm.pois))
(Intercept) x
(Intercept) 1.0000000 -0.9147526
x -0.9147526 1.0000000
Your profile CIs, which are computing (logistic(intercept_lwr + slope_lwr*x) and logistic(intercept_upr + slope_upr*x), are ignoring this correlation.
It's not easy to compute profile confidence intervals on derived values (i.e., on quantities other than the parameters themselves).

How to find x value for OR = 1 in logit crude and adjusted GAM

I've some data for fitting crude and adjusted logit GAMs:
library(mgcv)
## Simulate some data...
set.seed(3);n<-400
dat <- gamSim(1,n=n)
mu <- binomial()$linkinv(dat$f/4-2)
phi <- .5
a <- mu*phi;b <- phi - a;
dat$y <- rbeta(n,a,b)
## Fitting GAMs
crude <- gam(y~s(x0),family=binomial(link="logit"),data=dat)
adj <- gam(y~s(x0)+s(x1)+s(x2)+s(x3),family=binomial(link="logit"),data=dat)
Now I would intercept the value of x0 with the odds ratio (OR) 1.00 (i.e. probability 0.50). For this purpose I use visreg with argument plot = FALSE.
## Prepare data for ggplotting
library(visreg)
p.crude <- visreg(crude, "x0", plot = FALSE)
p.adj <- visreg(adj, "x0", plot = FALSE)
library(dplyr)
bind_rows(
mutate(p.crude$fit, Model = "crude"),
mutate(p.adj$fit, Model = "adj")
) -> fits
Ok. I gonna compute OR from LogOR. Is the following code correct?
# Compute ORs and CI from LogOR
fits$or <- exp(fits$visregFit)
fits$ci.low <- exp(fits$visregLwr)
fits$ci.up <- exp(fits$visregUpr)
Now I use approx in order to interpolate the x0 value with OR 1.00
## Interpolate x0 which give OR 1.00 (or 50% of probability)
x.crude <- round(approx(x = crude$fitted.values, y=crude$model$x0, xout = .5)$y, 1)
x.adj <- round(approx(x = adj$fitted.values, y=adj$model$x0, xout = .5)$y, 1)
Finally, I'm plotting the two models in a single graph:
## Plotting using ggplot
library(ggplot2)
ggplot(data = fits) +
geom_vline(aes(xintercept = x.crude), size=.2, color="black")+
geom_vline(aes(xintercept = x.adj), size=.2, color="red")+
annotate(geom ="text", x= x.crude - 0.05, y=.5, label = x.crude, size=3.5) +
annotate(geom ="text", x= x.adj - 0.05, y=.5, label = x.adj, size=3.5, color="red") +
geom_ribbon(aes(x0, ymin=ci.low, ymax=ci.up, group=Model, fill=Model), alpha=.05) +
geom_line(aes(x0, or, group=Model, color=Model)) +
labs(x="X0", y="Odds ratio")+
theme_bw(16)
As you can see, only the crude model shows an intercept with OR almost equal to 1.00 (x0 = 0.9), while this never happens for the adj model.
First, how can I get an interpolation with OR that is exactly at 1?
Second...With the limitation of my statistical knowledge, it was my understanding that I should have observed an intercept with OR=1 for the adj model, as well, based on the observed values for x0 according to this model. Why is the relative curve set upwards?

ggplot2: How to plot an orthogonal regression line?

I have tested a large sample of participants on two different tests of visual perception – now, I'd like to see to what extent performance on both tests correlates.
To visualise the correlation, I plot a scatterplot in R using ggplot() and I fit a regression line (using stat_smooth()). However, since both my x and y variable are performance measures, I need to take both of them into account when fitting my regression line – thus, I cannot use a simple linear regression (using stat_smooth(method="lm")), but rather need to fit an orthogonal regression (or Total least squares). How would I go about doing this?
I know I can specify formula in stat_smooth(), but I wouldn't know what formula to use. From what I understand, none of the preset methods (lm, glm, gam, loess, rlm) are applicable.
It turns out that you can extract the slope and intercept from principal components analysis on (x,y), as shown here. This is just a little simpler, runs in base R, and gives the identical result to using Deming(...) in MethComp.
# same `x and `y` as #user20650's answer
df <- data.frame(y, x)
pca <- prcomp(~x+y, df)
slp <- with(pca, rotation[2,1] / rotation[1,1])
int <- with(pca, center[2] - slp*center[1])
ggplot(df, aes(x,y)) +
geom_point() +
stat_smooth(method=lm, color="green", se=FALSE) +
geom_abline(slope=slp, intercept=int, color="blue")
Caveat: not familiar with this method
I think you should be able to just pass the slope and intercept to geom_abline to produce the fitted line. Alternatively, you could define your own method to pass to stat_smooth (as shown at the link smooth.Pspline wrapper for stat_smooth (in ggplot2)). I used the Deming function from the MethComp package as suggested at link How to calculate Total least squares in R? (Orthogonal regression).
library(MethComp)
library(ggplot2)
# Sample data and model (from ?Deming example)
set.seed(1)
M <- runif(100,0,5)
# Measurements:
x <- M + rnorm(100)
y <- 2 + 3 * M + rnorm(100,sd=2)
# Deming regression
mod <- Deming(x,y)
# Define functions to pass to stat_smooth - see mnel's answer at link for details
# Defined the Deming model output as class Deming to define the predict method
# I only used the intercept and slope for predictions - is this correct?
f <- function(formula,data,SDR=2,...){
M <- model.frame(formula, data)
d <- Deming(x =M[,2],y =M[,1], sdr=SDR)[1:2]
class(d) <- "Deming"
d
}
# an s3 method for predictdf (called within stat_smooth)
predictdf.Deming <- function(model, xseq, se, level) {
pred <- model %*% t(cbind(1, xseq) )
data.frame(x = xseq, y = c(pred))
}
ggplot(data.frame(x,y), aes(x, y)) + geom_point() +
stat_smooth(method = f, se= FALSE, colour='red', formula=y~x, SDR=1) +
geom_abline(intercept=mod[1], slope=mod[2], colour='blue') +
stat_smooth(method = "lm", se= FALSE, colour='green', formula = y~x)
So passing the intercept and slope to geom_abline produces the same fitted line (as expected). So if this is the correct approach then imo its easier to go with this.
The MethComp package seems to be no longer maintained (was removed from CRAN).
Russel88/COEF allows to use stat_/geom_summary with method="tls" to add an orthogonal regression line.
Based on this and wikipedia:Deming_regression I created the following functions, which allow to use noise ratios other than 1:
deming.fit <- function(x, y, noise_ratio = sd(y)/sd(x)) {
if(missing(noise_ratio) || is.null(noise_ratio)) noise_ratio <- eval(formals(sys.function(0))$noise_ratio) # this is just a complicated way to write `sd(y)/sd(x)`
delta <- noise_ratio^2
x_name <- deparse(substitute(x))
s_yy <- var(y)
s_xx <- var(x)
s_xy <- cov(x, y)
beta1 <- (s_yy - delta*s_xx + sqrt((s_yy - delta*s_xx)^2 + 4*delta*s_xy^2)) / (2*s_xy)
beta0 <- mean(y) - beta1 * mean(x)
res <- c(beta0 = beta0, beta1 = beta1)
names(res) <- c("(Intercept)", x_name)
class(res) <- "Deming"
res
}
deming <- function(formula, data, R = 100, noise_ratio = NULL, ...){
ret <- boot::boot(
data = model.frame(formula, data),
statistic = function(data, ind) {
data <- data[ind, ]
args <- rlang::parse_exprs(colnames(data))
names(args) <- c("y", "x")
rlang::eval_tidy(rlang::expr(deming.fit(!!!args, noise_ratio = noise_ratio)), data, env = rlang::current_env())
},
R=R
)
class(ret) <- c("Deming", class(ret))
ret
}
predictdf.Deming <- function(model, xseq, se, level) {
pred <- as.vector(tcrossprod(model$t0, cbind(1, xseq)))
if(se) {
preds <- tcrossprod(model$t, cbind(1, xseq))
data.frame(
x = xseq,
y = pred,
ymin = apply(preds, 2, function(x) quantile(x, probs = (1-level)/2)),
ymax = apply(preds, 2, function(x) quantile(x, probs = 1-((1-level)/2)))
)
} else {
return(data.frame(x = xseq, y = pred))
}
}
# unrelated hlper function to create a nicer plot:
fix_plot_limits <- function(p) p + coord_cartesian(xlim=ggplot_build(p)$layout$panel_params[[1]]$x.range, ylim=ggplot_build(p)$layout$panel_params[[1]]$y.range)
Demonstration:
library(ggplot2)
#devtools::install_github("Russel88/COEF")
library(COEF)
fix_plot_limits(
ggplot(data.frame(x = (1:5) + rnorm(100), y = (1:5) + rnorm(100)*2), mapping = aes(x=x, y=y)) +
geom_point()
) +
geom_smooth(method=deming, aes(color="deming"), method.args = list(noise_ratio=2)) +
geom_smooth(method=lm, aes(color="lm")) +
geom_smooth(method = COEF::tls, aes(color="tls"))
Created on 2019-12-04 by the reprex package (v0.3.0)
For anyone who is interested, I validated jhoward's solution against the deming::deming() function, as I was not familiar with jhoward's method of extracting the slope and intercept using PCA. They indeed produce identical results. Reprex is:
# Sample data and model (from ?Deming example)
set.seed(1)
M <- runif(100,0,5)
# Measurements:
x <- M + rnorm(100)
y <- 2 + 3 * M + rnorm(100,sd=2)
# Make data.frame()
df <- data.frame(x,y)
# Get intercept and slope using deming::deming()
library(deming)
mod_Dem <- deming::deming(y~x,df)
slp_Dem <- mod_Dem$coefficients[2]
int_Dem <- mod_Dem$coefficients[1]
# Get intercept and slope using jhoward's method
pca <- prcomp(~x+y, df)
slp_jhoward <- with(pca, rotation[2,1] / rotation[1,1])
int_jhoward <- with(pca, center[2] - slp_jhoward*center[1])
# Plot both orthogonal regression lines and simple linear regression line
library(ggplot2)
ggplot(df, aes(x,y)) +
geom_point() +
stat_smooth(method=lm, color="green", se=FALSE) +
geom_abline(slope=slp_jhoward, intercept=int_jhoward, color="blue", lwd = 3) +
geom_abline(slope=slp_Dem, intercept=int_Dem, color = "white", lwd = 2, linetype = 3)
Interestingly, if you switch the order of x and y in the models (i.e., to mod_Dem <- deming::deming(x~y,df) and pca <- prcomp(~y+x, df)) , you get completely different slopes:
My (very superficial) understanding of orthogonal regression was that it does not treat either variable as independent or dependent, and thus that the regression line should be unaffected by how the model is specified, e.g., as y~x vs x~y. Clearly I was very much mistaken, and I would be interested to hear anyone's thoughts about exactly why I was so wrong.

R: predict.glm equivalent for MCMCpack::MCMClogit

I am running a Bayesian logit with MCMCpack::MCMClogit. The syntax is easy and follows lm() or glm(), but I can't find any equivalent of the predict.glm function. Is there any way of predicting the probabilities of the outcomes in MCMClogit for each unit of observation in the dataframe? predict() is especially useful for validating training data from new data, which is what I ultimately have to do.
df = read.csv("http://dl.dropbox.com/u/1791181/MCMC.csv")#Read in data
model.glm = glm(SECONDARY.LEVEL ~ AGE + SEX, data=df, family=binomial(link=logit))
glm.predict = predict(model.glm, type="response")
For MCMClogit():
model.mcmc = MCMClogit(SECONDARY.LEVEL ~ AGE + SEX, data=df, mcmc=1000)
You could use the posterior distribution of model parameters produced by MCMC to get a distribution of predictions, using the logistic function.
For instance, if your model formula is y ~ x1 + x2 + x3, and your MCMC output is stored in the variable posterior.mcmc, then you could use
function(x1, x2, x3) 1 / (1 + exp(-posterior.mcmc %*% rbind(1, x1, x2, x3)))
to give the distribution analogous to predict.glm(., 'response')
More detailed example for the case of a single input variable:
library(extraDistr)
library(MCMCpack)
# Take x uniformly distributed between -100 and 100
x <- runif(2000, min=-100, max=100)
# Generate a response which is logistic with some noise
beta <- 1/8
eps <- rnorm(length(x), 0, 1)
p <- function(x, eps) 1 / (1 + exp(-beta*x + eps))
p.x <- p(x, eps)
y <- sapply(p.x, function(p) rbern(1, p))
df1 <- data.frame(x, y)
# Fit by logistic regression
glm.logistic <- glm(y ~ x, df1, family=binomial)
# MCMC gives a distribution of values for the model parameters
posterior.mcmc <- MCMClogit(y ~ x, df1, verbose=2000)
densplot(posterior.mcmc)
# Thus, we have a distribution of model predictions for each x
predict.p.mcmc <- function(x) 1 / (1 + exp(-posterior.mcmc %*% rbind(1,x)))
interval.p.mcmc <- function(x, low, high) apply(predict.p.mcmc(x), 2,
function(x) quantile(x, c(low, high)))
predict.y.mcmc <- function(x) posterior.mcmc %*% rbind(1,x)
interval.y.mcmc <- function(x, low, high) apply(predict.y.mcmc(x), 2,
function(x) quantile(x, c(low, high)))
## Plot the data and fits ##
plot(x, p.x, ylab = 'Pr(y=1)', pch = 20, cex = 0.5, main = 'Probability vs x')
# x-values for prediction
x_test <- seq(-100, 100, 0.01)
# Blue line is the logistic function we used to generate the data, with noise removed
p_of_x_test <- p(x_test, 0)
lines(x_test, p_of_x_test, col = 'blue')
# Green line is the prediction from logistic regression
lines(x_test, predict(glm.logistic, data.frame(x = x_test), 'response'), col = 'green')
# Red lines indicates the range of model predictions from MCMC
# (for each x, 95% of the distribution of model predictions lies between these bounds)
interval.p.mcmc_95 <- interval.p.mcmc(x_test, 0.025, 0.975)
lines(x_test, interval.p.mcmc_95[1,], col = 'red')
lines(x_test, interval.p.mcmc_95[2,], col = 'red')
# Similarly for the log-odds
plot(x, log(p.x/(1 - p.x)), ylab = 'log[Pr(y=1) / (1 - Pr(y=1))]',
pch = 20, cex = 0.5, main = 'Log-Odds vs x')
lines(x_test, log(p_of_x_test/(1 - p_of_x_test)), col = 'blue')
lines(x_test, predict(glm.logistic, data.frame(x = x_test)), col = 'green')
interval.y.mcmc_95 <- interval.y.mcmc(x_test, 0.025, 0.975)
lines(x_test, interval.y.mcmc_95[1,], col = 'red')
lines(x_test, interval.y.mcmc_95[2,], col = 'red')
The description of the function says :
This function generates a sample from the posterior distribution of a logistic regression model using a random walk Metropolis algorithm.
I think therefore that your model.mcmc already contains the points that MCMClogit() has simulated.
You can use str to see what it contains and summary and plot functions on it like in the example there : http://cran.r-project.org/web/packages/MCMCpack/MCMCpack.pdf

Resources