Calculate y value for distribution fitting functions R - r

I am plotting curves for different distribution functions and I need to know the highest y-value for each curve. Later I will plot only the one curve, which is selected as the best fitting.
This is the function (it is a bit hard-coded, I am working on it):
library(plyr)
library(dplyr)
library(fitdistrplus)
library(evd)
library(gamlss)
fdistr <- function(d) {
# Uncomment to try run line by line
# d <- data_to_plot
TLT <- d$TLT
if (sum(TLT<=0)) {TLT[TLT<=0] <- 0.001} # removing value < 0 for log clculation
gev <- fgev(TLT, std.err=FALSE)
distr <- c('norm', 'lnorm', 'weibull', 'gamma')
fit <- lapply(X=distr, FUN=fitdist, data=TLT)
fit[[5]] <- gev
distr[5] <- 'gev'
names(fit) <- distr
Loglike <- sapply(X=fit, FUN=logLik)
Loglike_Best <- which(Loglike == max(Loglike))
# Uncomment to try run line by line
# max <- which.max(density(d$TLT)$y)
# max_density <- stats::density(d$TLT)$y[max]
# max_y <- max_density
x_data <- max(d$TLT)
hist(TLT, prob=TRUE, breaks= x_data,
main=paste(d$DLT_Code[1],
'- best :',
names(Loglike[Loglike_Best])),
sub = 'Total Lead Times',
col='lightgrey',
border='white'
# ylim= c(0,max_y)
)
lines(density(TLT),
col='darkgrey',
lty=2,
lwd=2)
grid(nx = NA, ny = NULL, col = "gray", lty = "dotted",
lwd = .5, equilogs = TRUE)
curve(dnorm(x,
mean=fit[['norm']]$estimate[1],
sd=fit[['norm']]$estimate[2]),
add=TRUE, col='blue', lwd=2)
curve(dlnorm(x,
meanlog=fit[['lnorm']]$estimate[1],
sdlog=fit[['lnorm']]$estimate[2]),
add=TRUE, col='darkgreen', lwd=2)
curve(dweibull(x,
shape=fit[['weibull']]$estimate[1],
scale=fit[['weibull']]$estimate[2]),
add=TRUE, col='purple', lwd=2)
curve(dgamma(x,
shape=fit[['gamma']]$estimate[1],
rate=fit[['gamma']]$estimate[2]),
add=TRUE, col='Gold', lwd=2)
curve(dgev(x,
loc=fit[['gev']]$estimate[1],
scale=fit[['gev']]$estimate[2],
shape=fit[['gev']]$estimate[3]),
add=TRUE, col='red', lwd=2)
legend_loglik <- paste(c('Norm', 'LogNorm', 'Weibull', 'Gamma','GEV'), c(':'),
round(Loglike, digits=2))
legend("topright", legend=legend_loglik,
col=c('blue', 'darkgreen', 'purple', 'gold', 'red'),
lty=1, lwd=2,
bty='o', bg='white', box.lty=2, box.lwd = 1, box.col='white')
return(data.frame(DLT_Code = d$DLT_Code[1],
n = length(d$TLT),
Best = names(Loglike[Loglike_Best]),
lnorm = Loglike[1],
norm = Loglike[2],
weibul = Loglike[3],
gamma = Loglike[4],
GEV = Loglike[5]))
}
# Creating data set
TLT <- c(rep(0,32), rep(1,120), rep(2,10), rep(3,67), rep(4,14), rep(5,7), 6)
DLT_Code <- c(rep('DLT_Code',251))
data_to_plot <- data.frame(cbind(DLT_Code,TLT))
data_to_plot$TLT <- as.numeric(as.character(data_to_plot$TLT ))
DLT_Distr <- do.call(rbind, by(data = data_to_plot, INDICES = data_to_plot$DLT_Code, FUN=fdistr))
I was trying to play with max_y and then to use it in ylim. I could do it only for normal density, but not for the rest curves.
Currently plot looks like this (some curves are cut):
If to set up ylim = c(0,2) we can see, that lognormal and gamma distribution goes beyond 1:
I need to know the max value for each curve, so, when I choose which curve will be printed, to set up the correct ylim.

You could use purrr::map_dbl to map the function optimize over your densities if you rearrange your code slightly and you have an idea over what input values you want to find their maxima/the density exists.
You can set your densities with whatever your parameters are ahead of time, that way you can find their peak values using optimize and also pass them to the curve function.
As a small reproducible example:
library(purrr)
# parameterize your densities
mynorm <- function(x) dnorm(x, mean = 0, sd = 1)
mygamma <- function(x) dgamma(x, rate = .5, shape = 1)
# get largest maximum over interval
ymax <- max(purrr::map_dbl(c(mynorm, mygamma), ~ optimize(., interval = c(0, 3), maximum = T)$objective))
# 0.4999811
# plot data
curve(mynorm, col = "blue", lwd = 2, xlim = c(0, 3), ylim = c(0, ymax * 1.1))
curve(mygamma, col = "red", lwd = 2, add = T)
Using your code I've implemented the above solution and adjusted the x grid of the curve function to show you what I mean after our discussion in the comments to make things more clear and show you what you should actually be plotting:
library(plyr)
library(dplyr)
library(fitdistrplus)
library(evd)
library(gamlss)
library(purrr) # <- add this library
fdistr <- function(d) {
# Uncomment to try run line by line
# d <- data_to_plot
TLT <- d$TLT
if (sum(TLT<=0)) {TLT[TLT<=0] <- 0.001} # removing value < 0 for log clculation
gev <- fgev(TLT, std.err=FALSE)
distr <- c('norm', 'lnorm', 'weibull', 'gamma')
fit <- lapply(X=distr, FUN=fitdist, data=TLT)
fit[[5]] <- gev
distr[5] <- 'gev'
names(fit) <- distr
Loglike <- sapply(X=fit, FUN=logLik)
Loglike_Best <- which(Loglike == max(Loglike))
# Uncomment to try run line by line
# max <- which.max(density(d$TLT)$y)
# max_density <- stats::density(d$TLT)$y[max]
# max_y <- max_density
x_data <- max(d$TLT)
# parameterize your densities before plotting
mynorm <- function(x) {
dnorm(x,
mean=fit[['norm']]$estimate[1],
sd=fit[['norm']]$estimate[2])
}
mylnorm <- function(x){
dlnorm(x,
meanlog=fit[['lnorm']]$estimate[1],
sdlog=fit[['lnorm']]$estimate[2])
}
myweibull <- function(x) {
dweibull(x,
shape=fit[['weibull']]$estimate[1],
scale=fit[['weibull']]$estimate[2])
}
mygamma <- function(x) {
dgamma(x,
shape=fit[['gamma']]$estimate[1],
rate=fit[['gamma']]$estimate[2])
}
mygev <- function(x){
dgev(x,
loc=fit[['gev']]$estimate[1],
scale=fit[['gev']]$estimate[2],
shape=fit[['gev']]$estimate[3])
}
distributions <- c(mynorm, mylnorm, myweibull, mygamma, mygev)
# get the max of each density
y <- purrr::map_dbl(distributions, ~ optimize(., interval = c(0, x_data), maximum = T)$objective)
# find the max (excluding infinity)
ymax <- max(y[abs(y) < Inf])
hist(TLT, prob=TRUE, breaks= x_data,
main=paste(d$DLT_Code[1],
'- best :',
names(Loglike[Loglike_Best])),
sub = 'Total Lead Times',
col='lightgrey',
border='white',
ylim= c(0, ymax)
)
lines(density(TLT),
col='darkgrey',
lty=2,
lwd=2)
grid(nx = NA, ny = NULL, col = "gray", lty = "dotted",
lwd = .5, equilogs = TRUE)
curve(mynorm,
add=TRUE, col='blue', lwd=2, n = 1E5) # <- increase x grid
curve(mylnorm,
add=TRUE, col='darkgreen', lwd=2, n = 1E5) # <- increase x grid
curve(myweibull,
add=TRUE, col='purple', lwd=2, n = 1E5) # <- increase x grid
curve(mygamma,
add=TRUE, col='Gold', lwd=2, n = 1E5) # <- increase x grid
curve(mygev,
add=TRUE, col='red', lwd=2, n = 1E5) # <- increase x grid
legend_loglik <- paste(c('Norm', 'LogNorm', 'Weibull', 'Gamma','GEV'), c(':'),
round(Loglike, digits=2))
legend("topright", legend=legend_loglik,
col=c('blue', 'darkgreen', 'purple', 'gold', 'red'),
lty=1, lwd=2,
bty='o', bg='white', box.lty=2, box.lwd = 1, box.col='white')
return(data.frame(DLT_Code = d$DLT_Code[1],
n = length(d$TLT),
Best = names(Loglike[Loglike_Best]),
lnorm = Loglike[1],
norm = Loglike[2],
weibul = Loglike[3],
gamma = Loglike[4],
GEV = Loglike[5]))
}
# Creating data set
TLT <- c(rep(0,32), rep(1,120), rep(2,10), rep(3,67), rep(4,14), rep(5,7), 6)
DLT_Code <- c(rep('DLT_Code',251))
data_to_plot <- data.frame(cbind(DLT_Code,TLT))
data_to_plot$TLT <- as.numeric(as.character(data_to_plot$TLT ))
DLT_Distr <- do.call(rbind, by(data = data_to_plot, INDICES = data_to_plot$DLT_Code, FUN=fdistr))
Why your plot height isn't matching the solution output
To illustrate further what's going on with your plot and some of the confusion you might have you need to understand how the curve function is plotting your data. By default curve takes 101 x-values and evaluates your functions to get their y-values and then plots those points as a line. Because the peaks on some of your density are so sharp, the curve function isn't evaluating enough x-values to plot your density peaks. To show you want I mean I will focus on your gamma density. Don't worry too much about the code as much as the output. Below I have the first few (x,y) coordinates for different values of n.
library(purrr)
mygamma <- function(x) {
dgamma(x,
shape=fit[['gamma']]$estimate[1], # 0.6225622
rate=fit[['gamma']]$estimate[2]) # 0.3568242
}
number_of_x <- c(5, 10, 101, 75000)
purrr::imap_dfr(number_of_x, ~ curve(mygamma, xlim = c(0, 6), n = .), .id = "n") %>%
dplyr::mutate_at(1, ~ sprintf("n = %i", number_of_x[as.numeric(.)])) %>%
dplyr::mutate(n = factor(n, unique(n))) %>%
dplyr::filter(x > 0) %>%
dplyr::group_by(n) %>%
dplyr::slice_min(order_by = x, n = 5)
n x y
<fct> <dbl> <dbl>
1 n = 5 1.5 0.184
2 n = 5 3 0.0828
3 n = 5 4.5 0.0416
4 n = 5 6 0.0219
5 n = 10 0.667 0.336
6 n = 10 1.33 0.204
7 n = 10 2 0.138
8 n = 10 2.67 0.0975
9 n = 10 3.33 0.0707
10 n = 101 0.06 1.04
11 n = 101 0.12 0.780
12 n = 101 0.18 0.655
13 n = 101 0.24 0.575
14 n = 101 0.3 0.518
15 n = 75000 0.0000800 12.9
16 n = 75000 0.000160 9.90
17 n = 75000 0.000240 8.50
18 n = 75000 0.000320 7.62
19 n = 75000 0.000400 7.01
Notice that when n = 5 you have very few values plotted. As n increases, the distance between the x-values gets smaller. Since these functions are continuous, there are infinite number of points to plot, but that cannot be done computationally so a subset of x-values are plotted to approximate. The more x-values the better the approximation. Normally, the default n = 101 works fine, but because the gamma and log-normal densities have such sharp peaks the plot function is stepping over the maximum value. Below is a full plot of the data for n = 5, 10, 101, 75000 with points added.

Finally I have used this solution, found here:
mygamma <- function(x) dgamma(x, shape=fit[['gamma']]$estimate[1],
rate=fit[['gamma']]$estimate[2])
get_curve_values <- function(fn, x_data){
res <- curve(fn, from=0, to=x_data)
dev.off()
res
}
curve_val <- get_curve_values(mygamma, x_data)
ylim <- max(curve_val$y,na.rm = TRUE)

Related

R - sapply does not seem to apply function to each element of a vector

I am trying to run a simulation to illustrate the unbiasedness of the OLS slope in simple linear regression when Y is affected by different levels of error variance. The following code seems to use only the first argument of sd <- c(1, 10). I guess something is wrong with how sd is used within the sapply() function but can't find the actual error.
set.seed(6578)
n <- 200 # sample size
n_samples <- 5000 # Number of samples drawn
sd <- c(1, 10) # Varying levels of error
x <- rnorm(n) # Covariate values
regressions <- function(x, sd) {
z <- .4 * x + rnorm(n, 0, sd)
fit_lm <- lm(z ~ x)
slopes <- fit_lm$coefficients[2]
return(slopes)
}
sample_slopes_varying_error <- sapply(sd, function(f) replicate(n_samples, regressions(x, sd)))
plot(density(sample_slopes_varying_error[, 1]), col = "red", main = "", xlab = "Slope estimates")
lines(density(sample_slopes_varying_error[, 2]), col = "blue")
abline(v = mean(sample_slopes_varying_error[, 1]), lty = "dashed", col = "red")
abline(v = mean(sample_slopes_varying_error[, 2]), lty = "dotted", col = "red")
apply(sample_slopes_varying_error, 2, sd) # Almost identical SDs.
#> [1] 0.5282542 0.5353999

Hist with lines in R

I generate 4 parts of big data: cluster1(10000 points), cluster2(15000 points), cluster3(15000 points) and throws(500 points). Here is the code:
library('MASS')
library('fpc')
#library("dbscan")
library("factoextra")
library("clustertend")
library("boot")
library("stream")
set.seed(123)
mu1<-c(-5,-7)
mu1
sigma1<-matrix(c(4,-2,-2,2), nrow=2, ncol=2, byrow = TRUE)
sigma1
n<-10000
cluster1<-mvrnorm(n,mu1,sigma1)
cluster1
#cluster1<-as.data.frame(cluster1)
#cluster1
#c<-runif(10000,1,1000)
#c
phi <- runif(15000, max = 2*pi)
rho <- sqrt(runif(15000))
x <- sqrt(5)*rho*cos(phi) + 6
y <- sqrt(10/3)*rho*sin(phi) + 4
range(2*(x - 6)^2 + 3*(y - 4)^2)
#[1] 0.001536582 9.999425234
plot(x, y)
cluster2<-cbind(x,y)
cluster2
u <- runif(15000, max = 3)
v <- runif(15000, max = 2)
x <- u + v - 10
y <- v - u + 8
range(x + y)
#[1] -1.999774 1.999826
range(x - y + 15)
#[1] -2.999646 2.999692
plot(x, y)
cluster3<-cbind(x,y)
cluster3
#cluster3<-as.data.frame(cluster1)
#cluster3
x <- runif(500, -20, 20)
y <- runif(500, -20, 20)
#u <- runif(500, max = 20)
#v <- runif(500, max = 20)
#x <- u + v - 20
#y <- v - u
range(x)
range(y)
plot(x,y)
throws<-cbind(x,y)
throws
data<-rbind(cluster1,cluster2,cluster3,throws)
data<-as.data.frame(data)
data
plot(data)
Then I try by using the bootstrap method, construct a distribution of H statistics for some
fixed m, which is from 7% of the total number of generated points(m=2835). Here is th code where I do this:
B<-10#number of iterations
H<-NULL#value of Hopkins statistic
for(i in 1:B){
N<-dim(data)[1]
s<-sample(N,0.8*N)
stat<-hopkins(data[s,], n=2835, byrow = TRUE)$H
H[i]<-stat
#print(c(i, stat))
}
It takes very to generate. Then I should to compare this result with beta distribution - B(m,m). Here is the code:
hist(H)
#(density(H), col="red")
#hist(distB)
X<-seq(min(H), max(H), 0.001)
X
lines(X, dbeta(X,2835,2835), type="l", col="red")
The problem is that lined doesn't draw on hist. Can anybody say what is the problem? Here is the image, I see red line, but it's not exactly right.
Your y-axis values plotted by dbeta() are way too low to register on the supplied y-axis (<0.0000001). You need to overlay the second plot:
# sample data
H <- sample(seq(0.455,0.475,0.001), 1000, replace = TRUE)
#plot histogram
hist(H)
# prepare graphics to add second plot
par(new = TRUE)
# sample data for second plot
X <- seq(0.455,0.475, 0.001)
Y <- dbeta(X,2835,2835)
# plot second plot, remove axes
plot(X, dbeta(X,2835,2835), type="l", col="red", axes = FALSE)
axis(4, Y) # add axis on right side

Plotting the CDF and Quantile Functions Given the PDF

How would I plot the CDF and Quantile functions, in R, if I have the PDF. Currently, I have the following (but I think there must be a better way to do it):
## Probability Density Function
p <- function(x) {
result <- (x^2)/9
result[x < 0 | x > 3] <- 0
result
}
plot(p, xlim = c(0,3), main="Probability Density Function")
## Cumulative Distribution Function
F <- function(a = 0,b){
result <- ((b^3)/27) - ((a^3)/27)
result[a < 0 ] <- 0
result[b > 3] <- 1
result
}
plot(F(,x), xlim=c(0,3), main="Cumulative Distribution Function")
## Quantile Function
Finv <- function(p) {
3*x^(1/3)
}
As #dash2 suggested, the CDF would need you to integrate the PDF, in essence needing you to find the area under the curve.
Here's a generic solution which should help. I am using a gaussian distribution as an example - you should be able to feed to it any generic function.
Note that quantiles reported are approximations only. Also, dont forget to look into the documentation for integrate().
# CDF Function
CDF <- function(FUNC = p, plot = T, area = 0.5, LOWER = -10, UPPER = 10, SIZE = 1000){
# Create data
x <- seq(LOWER, UPPER, length.out = SIZE)
y <- p(x)
area.vec <- c()
area.vec[1] <- 0
for(i in 2:length(x)){
x.vec <- x[1:i]
y.vec <- y[1:i]
area.vec[i] = integrate(p, lower = x[1], upper = x[i])$value
}
# Quantile
quantile = x[which.min(abs(area.vec - area))]
# Plot if requested
if(plot == TRUE){
# PDF
par(mfrow = c(1, 2))
plot(x, y, type = "l", main = "PDF", col = "indianred", lwd = 2)
grid()
# CDF
plot(x, area.vec, type = "l", main = "CDF", col = "slateblue",
xlab = "X", ylab = "CDF", lwd = 2)
# Quantile
mtext(text = paste("Quantile at ", area, "=",
round(quantile, 3)), side = 3)
grid()
par(mfrow = c(1, 1))
}
}
# Sample data
# PDF Function - Gaussian distribution
p <- function(x, SD = 1, MU = 0){
y <- (1/(SD * sqrt(2*pi)) * exp(-0.5 * ((x - MU)/SD) ^ 2))
return(y)
}
# Call to function
CDF(p, area = 0.5, LOWER = -5, UPPER = 5)

Univariate adaptive kernel density estimation in R

Is there an R function which can calculate an adaptive kernel density function for univariate observations. What about akj (package quantreg)? Thanks.
I do not know about the package but it is quite simple to implement it yourself (this will also make your understand exactly what you are doing), for example lets take these values in the plan:
g = 5
n = 100
set.seed(g)
df = data.frame(x = unlist(lapply(1:g, function(i) rnorm(n/g, runif(1)*i))),
y= unlist(lapply(1:g, function(i) rnorm(n/g, runif(1)*i))))
plot(df)
Let's assume you want to estimate the density at three point x1 = c(6,-1), x2 = c(0.3, 2), x3=c(3, -0.5) on this distribution. The density should be weak on x1, high on x2 and the density on x3 should be between these two low and high densities:
points(6,-1, col='red', pch=19)
points(0.3,2, col='blue', pch=19)
points(3,-0.5, col='green', pch=19)
According to the definition of an adaptative kernel density function:
http://en.wikipedia.org/wiki/Variable_kernel_density_estimation
library(functional)
gaussianKernel = function(u, h) exp(-sum(u^2)/(2*h^2))
densityFunction = function(x, df, ker, h)
{
difference = t(t(df) - x)
W = sum(apply(difference, 1, ker, h=h))
W/(nrow(df)*(h^(length(df))))
}
myDensityFunction = Curry(densityFunction, df=df, ker=gaussianKernel , h=2)
And we have the confirmation of the intuitive result: 0 <= P(x1) < P(x3) < P(x2) <=1
#> myDensityFunction(x1)
#[1] 0.02140895
#> myDensityFunction(x2)
#[1] 0.1146402
#> myDensityFunction(x3)
#[1] 0.09341908

how to find 95% confidence bands for predicting mean y per value of x and 95% prediction bands for predicting individual y values [duplicate]

If I have 10 values, each of which has a fitted value F, and an upper and lower confidence interval U and L:
set.seed(0815)
F <- runif(10, 1, 2)
L <- runif(10, 0, 1)
U <- runif(10, 2, 3)
How can I show these 10 fitted values and their confidence intervals in the same plot like the one below in R?
Here is a plotrix solution:
set.seed(0815)
x <- 1:10
F <- runif(10,1,2)
L <- runif(10,0,1)
U <- runif(10,2,3)
require(plotrix)
plotCI(x, F, ui=U, li=L)
And here is a ggplot solution:
set.seed(0815)
df <- data.frame(x =1:10,
F =runif(10,1,2),
L =runif(10,0,1),
U =runif(10,2,3))
require(ggplot2)
ggplot(df, aes(x = x, y = F)) +
geom_point(size = 4) +
geom_errorbar(aes(ymax = U, ymin = L))
UPDATE:
Here is a base solution to your edits:
set.seed(1234)
x <- rnorm(20)
df <- data.frame(x = x,
y = x + rnorm(20))
plot(y ~ x, data = df)
# model
mod <- lm(y ~ x, data = df)
# predicts + interval
newx <- seq(min(df$x), max(df$x), length.out=100)
preds <- predict(mod, newdata = data.frame(x=newx),
interval = 'confidence')
# plot
plot(y ~ x, data = df, type = 'n')
# add fill
polygon(c(rev(newx), newx), c(rev(preds[ ,3]), preds[ ,2]), col = 'grey80', border = NA)
# model
abline(mod)
# intervals
lines(newx, preds[ ,3], lty = 'dashed', col = 'red')
lines(newx, preds[ ,2], lty = 'dashed', col = 'red')
Here is a solution using functions plot(), polygon() and lines().
set.seed(1234)
df <- data.frame(x =1:10,
F =runif(10,1,2),
L =runif(10,0,1),
U =runif(10,2,3))
plot(df$x, df$F, ylim = c(0,4), type = "l")
#make polygon where coordinates start with lower limit and
# then upper limit in reverse order
polygon(c(df$x,rev(df$x)),c(df$L,rev(df$U)),col = "grey75", border = FALSE)
lines(df$x, df$F, lwd = 2)
#add red lines on borders of polygon
lines(df$x, df$U, col="red",lty=2)
lines(df$x, df$L, col="red",lty=2)
Now use example data provided by OP in another question:
Lower <- c(0.418116841, 0.391011834, 0.393297710,
0.366144073,0.569956636,0.224775521,0.599166016,0.512269587,
0.531378573, 0.311448219, 0.392045751,0.153614913, 0.366684097,
0.161100849,0.700274810,0.629714150, 0.661641288, 0.533404093,
0.412427559, 0.432905333, 0.525306427,0.224292061,
0.28893064,0.099543648, 0.342995605,0.086973739,0.289030388,
0.081230826,0.164505624, -0.031290586,0.148383474,0.070517523,0.009686605,
-0.052703529,0.475924192,0.253382210, 0.354011010,0.130295355,0.102253218,
0.446598823,0.548330752,0.393985810,0.481691632,0.111811248,0.339626541,
0.267831909,0.133460254,0.347996621,0.412472322,0.133671128,0.178969601,0.484070587,
0.335833224,0.037258467, 0.141312363,0.361392799,0.129791998,
0.283759439,0.333893418,0.569533076,0.385258093,0.356201955,0.481816148,
0.531282473,0.273126565,0.267815691,0.138127486,0.008865700,0.018118398,0.080143484,
0.117861634,0.073697418,0.230002398,0.105855042,0.262367348,0.217799352,0.289108011,
0.161271889,0.219663224,0.306117717,0.538088622,0.320711912,0.264395149,0.396061543,
0.397350946,0.151726970,0.048650180,0.131914718,0.076629840,0.425849394,
0.068692279,0.155144797,0.137939059,0.301912657,-0.071415593,-0.030141781,0.119450922,
0.312927614,0.231345972)
Upper.limit <- c(0.6446223,0.6177311, 0.6034427, 0.5726503,
0.7644718, 0.4585430, 0.8205418, 0.7154043,0.7370033,
0.5285199, 0.5973728, 0.3764209, 0.5818298,
0.3960867,0.8972357, 0.8370151, 0.8359921, 0.7449118,
0.6152879, 0.6200704, 0.7041068, 0.4541011, 0.5222653,
0.3472364, 0.5956551, 0.3068065, 0.5112895, 0.3081448,
0.3745473, 0.1931089, 0.3890704, 0.3031025, 0.2472591,
0.1976092, 0.6906118, 0.4736644, 0.5770463, 0.3528607,
0.3307651, 0.6681629, 0.7476231, 0.5959025, 0.7128883,
0.3451623, 0.5609742, 0.4739216, 0.3694883, 0.5609220,
0.6343219, 0.3647751, 0.4247147, 0.6996334, 0.5562876,
0.2586490, 0.3750040, 0.5922248, 0.3626322, 0.5243285,
0.5548211, 0.7409648, 0.5820070, 0.5530232, 0.6863703,
0.7206998, 0.4952387, 0.4993264, 0.3527727, 0.2203694,
0.2583149, 0.3035342, 0.3462009, 0.3003602, 0.4506054,
0.3359478, 0.4834151, 0.4391330, 0.5273411, 0.3947622,
0.4133769, 0.5288060, 0.7492071, 0.5381701, 0.4825456,
0.6121942, 0.6192227, 0.3784870, 0.2574025, 0.3704140,
0.2945623, 0.6532694, 0.2697202, 0.3652230, 0.3696383,
0.5268808, 0.1545602, 0.2221450, 0.3553377, 0.5204076,
0.3550094)
Fitted.values<- c(0.53136955, 0.50437146, 0.49837019,
0.46939721, 0.66721423, 0.34165926, 0.70985388, 0.61383696,
0.63419092, 0.41998407, 0.49470927, 0.26501789, 0.47425695,
0.27859380, 0.79875525, 0.73336461, 0.74881668, 0.63915795,
0.51385774, 0.52648789, 0.61470661, 0.33919656, 0.40559797,
0.22339000, 0.46932536, 0.19689011, 0.40015996, 0.19468781,
0.26952645, 0.08090917, 0.26872696, 0.18680999, 0.12847285,
0.07245286, 0.58326799, 0.36352329, 0.46552867, 0.24157804,
0.21650915, 0.55738088, 0.64797691, 0.49494416, 0.59728999,
0.22848680, 0.45030036, 0.37087676, 0.25147426, 0.45445930,
0.52339711, 0.24922310, 0.30184215, 0.59185198, 0.44606040,
0.14795374, 0.25815819, 0.47680880, 0.24621212, 0.40404398,
0.44435727, 0.65524894, 0.48363255, 0.45461258, 0.58409323,
0.62599114, 0.38418264, 0.38357103, 0.24545011, 0.11461756,
0.13821664, 0.19183886, 0.23203127, 0.18702881, 0.34030391,
0.22090140, 0.37289121, 0.32846615, 0.40822456, 0.27801706,
0.31652008, 0.41746184, 0.64364785, 0.42944100, 0.37347037,
0.50412786, 0.50828681, 0.26510696, 0.15302635, 0.25116438,
0.18559609, 0.53955941, 0.16920626, 0.26018389, 0.25378867,
0.41439675, 0.04157232, 0.09600163, 0.23739430, 0.41666762,
0.29317767)
Assemble into a data frame (no x provided, so using indices)
df2 <- data.frame(x=seq(length(Fitted.values)),
fit=Fitted.values,lwr=Lower,upr=Upper.limit)
plot(fit~x,data=df2,ylim=range(c(df2$lwr,df2$upr)))
#make polygon where coordinates start with lower limit and then upper limit in reverse order
with(df2,polygon(c(x,rev(x)),c(lwr,rev(upr)),col = "grey75", border = FALSE))
matlines(df2[,1],df2[,-1],
lwd=c(2,1,1),
lty=1,
col=c("black","red","red"))
Here is part of my program related to plotting confidence interval.
1. Generate the test data
ads = 1
require(stats); require(graphics)
library(splines)
x_raw <- seq(1,10,0.1)
y <- cos(x_raw)+rnorm(len_data,0,0.1)
y[30] <- 1.4 # outlier point
len_data = length(x_raw)
N <- len_data
summary(fm1 <- lm(y~bs(x_raw, df=5), model = TRUE, x =T, y = T))
ht <-seq(1,10,length.out = len_data)
plot(x = x_raw, y = y,type = 'p')
y_e <- predict(fm1, data.frame(height = ht))
lines(x= ht, y = y_e)
Result
2. Fitting the raw data using B-spline smoother method
sigma_e <- sqrt(sum((y-y_e)^2)/N)
print(sigma_e)
H<-fm1$x
A <-solve(t(H) %*% H)
y_e_minus <- rep(0,N)
y_e_plus <- rep(0,N)
y_e_minus[N]
for (i in 1:N)
{
tmp <-t(matrix(H[i,])) %*% A %*% matrix(H[i,])
tmp <- 1.96*sqrt(tmp)
y_e_minus[i] <- y_e[i] - tmp
y_e_plus[i] <- y_e[i] + tmp
}
plot(x = x_raw, y = y,type = 'p')
polygon(c(ht,rev(ht)),c(y_e_minus,rev(y_e_plus)),col = rgb(1, 0, 0,0.5), border = NA)
#plot(x = x_raw, y = y,type = 'p')
lines(x= ht, y = y_e_plus, lty = 'dashed', col = 'red')
lines(x= ht, y = y_e)
lines(x= ht, y = y_e_minus, lty = 'dashed', col = 'red')
Result
Some addition to the previous answers. It is nice to regulate the density of the polygon to avoid obscuring the data points.
library(MASS)
attach(Boston)
lm.fit2 = lm(medv~poly(lstat,2))
plot(lstat,medv)
new.lstat = seq(min(lstat), max(lstat), length.out=100)
preds <- predict(lm.fit2, newdata = data.frame(lstat=new.lstat), interval = 'prediction')
lines(sort(lstat), fitted(lm.fit2)[order(lstat)], col='red', lwd=3)
polygon(c(rev(new.lstat), new.lstat), c(rev(preds[ ,3]), preds[ ,2]), density=10, col = 'blue', border = NA)
lines(new.lstat, preds[ ,3], lty = 'dashed', col = 'red')
lines(new.lstat, preds[ ,2], lty = 'dashed', col = 'red')
Please note that you see the prediction interval on the picture, which is several times wider than the confidence interval. You can read here the detailed explanation of those two types of interval estimates.

Resources