Multiple least squares quadratic fit in ggplot - r

Notice that your graphic constructed from Problem 4 shows a quadratic
or curved relationship between log_wages against exp. The next
task is to plot three quadratic functions for each race level "black",
"white" and "other". To estimate the quadratic fit, you can use the
following function quad_fit:
```{r}
quad_fit <- function(data_sub) {
return(lm(log_wage~exp+I(exp^2),data=data_sub)$coefficients)
}
quad_fit(salary_data)
```
The above function computes the least squares quadratic fit and
returns coefficients a1, a2, a3, where
Y(hat) = a1 + a2x + a3x^2
where Y(hat) = log(wage) and x = exp
Use ggplot to accomplish this task or use base R graphics for
partial credit. Make sure to include a legend and appropriate labels.
My attempt
blackfit <- quad_fit(salary_data[salary_data$race == "black",])
whitefit <- quad_fit(salary_data[salary_data$race == "white",])
otherfit <- quad_fit(salary_data[salary_data$race == "other",])
yblack <- blackfit[1] + blackfit[2]*salary_data$exp + blackfit[3]*(salary_data$exp)^2
ywhite <- whitefit[1] + whitefit[2]*salary_data$exp + whitefit[3]*(salary_data$exp)^2
yother <- otherfit[1] + otherfit[2]*salary_data$exp + otherfit[3]*(salary_data$exp)^2
soloblack <- salary_data[salary_data$race == "black",]
solowhite <- salary_data[salary_data$race == "white",]
soloother <- salary_data[salary_data$race == "other",]
ggplot(data = soloblack) +
geom_point(aes(x = exp, y = log_wage)) +
stat_smooth(aes(y = log_wage, x = exp), formula = y ~ yblack)
This is only the first attempt for the data filtered with for race == "black".
I am not clear how the formula should look like because through the quad_fit function it seems it already does the calculations for you.

Consider plotting fitted values using output of quad_fit (as shown by #StefanK here) and use by to plot across all distinct values of race:
reg_plot <- function(sub) {
# PREDICTED DATA FOR LINE PLOT
q_fit <- quad_fit(sub)
predicted_df <- data.frame(wage_pred = predict(q_fit, sub), exp = sub$exp)
# ORIGINAL SCATTER PLOT WITH PREDICTED LINE
ggplot(data = sub) +
geom_point(aes(x = exp, y = log_wage, alpha = exp)) +
labs(x = "Job Experience", y = "Log of Wage",
title = paste("Wage and Job Experience Plot for",
sub$race[[1]], "in Salary Dataset")
geom_line(color='red', data = predicted_df, aes(x = exp, y = wage_pred))
}
# RUN GRAPHS FOR EACH RACE
by(salary_data, salary_data$race, reg_plot)

Related

Plotting posterior parameter estimates from multiple models with bayesplot

I am using the great plotting library bayesplot to visualize posterior probability intervals from models I am estimating with rstanarm. I want to graphically compare draws from different models by getting the posterior intervals for coefficients onto the same plot.
Imagine, for instance, that I have 1000 draws from the posterior for three parameters beta1, beta2, beta3 for two different models:
# load the plotting library
library(bayesplot)
#> This is bayesplot version 1.6.0
#> - Online documentation and vignettes at mc-stan.org/bayesplot
#> - bayesplot theme set to bayesplot::theme_default()
#> * Does _not_ affect other ggplot2 plots
#> * See ?bayesplot_theme_set for details on theme setting
library(ggplot2)
# generate fake posterior draws from model1
fdata <- matrix(rnorm(1000 * 3), ncol = 3)
colnames(fdata) <- c('beta1', 'beta2', 'beta3')
# fake posterior draws from model 2
fdata2 <- matrix(rnorm(1000 * 3, 1, 2), ncol = 3)
colnames(fdata2) <- c('beta1', 'beta2', 'beta3')
Bayesplot makes fantastic visualizations for individual model draws, and it is ggplot2 'under the hood' so I can customize as I please:
# a nice plot of 1
color_scheme_set("orange")
mcmc_intervals(fdata) + theme_minimal() + ggtitle("Model 1")
# a nice plot of 2
color_scheme_set("blue")
mcmc_intervals(fdata2) + ggtitle("Model 2")
But what I would like to achieve is to plot these two models together on the same plot, such that for each coefficient I have two intervals and can distinguish which interval is which by mapping color to the model. However I can't figure out how to do this. Some things that don't work:
# doesnt work
mcmc_intervals(fdata) + mcmc_intervals(fdata2)
#> Error: Don't know how to add mcmc_intervals(fdata2) to a plot
# appears to pool
mcmc_intervals(list(fdata, fdata2))
Any ideas on how I could do this? Or how to do it manually given the matrices of posterior draws?
Created on 2018-10-18 by the reprex package (v0.2.1)
Just so the answer is also posted here, I have expanded on the code at the link from #Manny T (https://github.com/stan-dev/bayesplot/issues/232)
# simulate having posteriors for two different models each with parameters beta[1],..., beta[4]
posterior_1 <- matrix(rnorm(4000), 1000, 4)
posterior_2 <- matrix(rnorm(4000), 1000, 4)
colnames(posterior_1) <- colnames(posterior_2) <- paste0("beta[", 1:4, "]")
# use bayesplot::mcmc_intervals_data() function to get intervals data in format easy to pass to ggplot
library(bayesplot)
combined <- rbind(mcmc_intervals_data(posterior_1), mcmc_intervals_data(posterior_2))
combined$model <- rep(c("Model 1", "Model 2"), each = ncol(posterior_1))
# make the plot using ggplot
library(ggplot2)
theme_set(bayesplot::theme_default())
pos <- position_nudge(y = ifelse(combined$model == "Model 2", 0, 0.1))
ggplot(combined, aes(x = m, y = parameter, color = model)) +
geom_linerange(aes(xmin = l, xmax = h), position = pos, size=2)+
geom_linerange(aes(xmin = ll, xmax = hh), position = pos)+
geom_point(position = pos, color="black")
If you are like me, you will want 80% and 90% credible intervals (instead of 50% being the inner ones) and might want the coordinates flipped, and let's add a dashed line at 0 (model estimates no change). You can do that like this:
# use bayesplot::mcmc_intervals_data() function to get intervals data in format easy to pass to ggplot
library(bayesplot)
combined <- rbind(mcmc_intervals_data(posterior_1,prob=0.8,prob_outer = 0.9), mcmc_intervals_data(posterior_2,prob=0.8,prob_outer = 0.9))
combined$model <- rep(c("Model 1", "Model 2"), each = ncol(posterior_1))
# make the plot using ggplot
library(ggplot2)
theme_set(bayesplot::theme_default())
pos <- position_nudge(y = ifelse(combined$model == "Model 2", 0, 0.1))
ggplot(combined, aes(x = m, y = parameter, color = model)) +
geom_linerange(aes(xmin = l, xmax = h), position = pos, size=2)+
geom_linerange(aes(xmin = ll, xmax = hh), position = pos)+
geom_point(position = pos, color="black")+
coord_flip()+
geom_vline(xintercept=0,linetype="dashed")
A few things to note on this last one. I added prob_outer = 0.9 even though that is the default, just to show how you might change the outer credible intervals. The dashed line is created with geom_vline and xintercept = here instead of geom_hline and yintercept = because of the coord_flip (everything is reversed). So if you don't flip axes, you will need to do the opposite.
I asked this question on the bayesplot page on GitHub and got a response (Issue #232).
I blew more time than I'd like to admit writing this, so might as well post it here. Here's a function that incorporates the suggestions from above that (for the moment) works for rstanarm and brms model objects.
compare_posteriors <- function(..., dodge_width = 0.5) {
dots <- rlang::dots_list(..., .named = TRUE)
draws <- lapply(dots, function(x) {
if (class(x)[1] == "stanreg") {
posterior::subset_draws(posterior::as_draws(x$stanfit),
variable = names(fixef(x))
)
} else if (class(x)[1] == "brmsfit") {
brm_draws <- posterior::subset_draws(posterior::as_draws(x$fit),
variable = paste0("b_", rownames(fixef(x)))
)
posterior::variables(brm_draws) <- stringr::str_split(posterior::variables(brm_draws), "_", simplify = T)[, 2]
posterior::rename_variables(brm_draws, `(Intercept)` = Intercept)
} else {
stop(paste0(class(x)[1], " objects not supported."))
}
})
intervals <- lapply(draws, bayesplot::mcmc_intervals_data)
combined <- dplyr::bind_rows(intervals, .id = "model")
ggplot(combined, aes(x = m, y = parameter, color = model, group = model)) +
geom_linerange(aes(xmin = l, xmax = h), size = 2, position = position_dodge(dodge_width)) +
geom_linerange(aes(xmin = ll, xmax = hh), position = position_dodge(dodge_width)) +
geom_point(color = "black", position = position_dodge(dodge_width)) +
geom_vline(xintercept = 0, linetype = "dashed")
}
Usage:
compare_posteriors(mod1, mod2, mod3)

Plot one data frame column against all other columns using ggplots and showing densities in R

I have a data frame with 20 columns, and I want to plot one specific column (called BB) against each single column in the data frame. The plots I need are probability density plots, and I’m using the following code to generate one plot (plotting columns BB vs. AA as an example):
mydata = as.data.frame(fread("filename.txt")) #read my data as data frame
#function to calculate density
get_density <- function(x, y, n = 100) {
dens <- MASS::kde2d(x = x, y = y, n = n)
ix <- findInterval(x, dens$x)
iy <- findInterval(y, dens$y)
ii <- cbind(ix, iy)
return(dens$z[ii])
}
set.seed(1)
#define the x and y of the plot; x = column called AA; y = column called BB
xy1 <- data.frame(
x = mydata$AA,
y = mydata$BB
)
#call function get_density to calculate density for the defined x an y
xy1$density <- get_density(xy1$x, xy1$y)
#Plot
ggplot(xy1) + geom_point(aes(x, y, color = density), size = 3, pch = 20) + scale_color_viridis() +
labs(title = "BB vs. AA") +
scale_x_continuous(name="AA") +
scale_y_continuous(name="BB")
Would appreciate it if someone can suggest a method to produce multiple plot of BB against every other column, using the above density function and ggplot command. I tried adding a loop, but found it too complicated especially when defining the x and y to be plotted or calling the density function.
Since you don't provide sample data, I'll demo on mtcars. We convert the data to long format, calculate the densities, and make a faceted plot. We plot the mpg column against all others.
library(dplyr)
library(tidyr)
mtlong = gather(mtcars, key = "var", value = "value", -mpg) %>%
group_by(var) %>%
mutate(density = get_density(value, mpg))
ggplot(mtlong, aes(x = value, y = mpg, color = density)) +
geom_point(pch = 20, size = 3) +
labs(x = "") +
facet_wrap(~ var, scales = "free")

Having several fits in one plot (in R)

I was wondering how I can modify the following code to have a plot something like
data(airquality)
library(quantreg)
library(ggplot2)
library(data.table)
library(devtools)
# source Quantile LOESS
source("https://www.r-statistics.com/wp-content/uploads/2010/04/Quantile.loess_.r.txt")
airquality2 <- na.omit(airquality[ , c(1, 4)])
#'' quantreg::rq
rq_fit <- rq(Ozone ~ Temp, 0.95, airquality2)
rq_fit_df <- data.table(t(coef(rq_fit)))
names(rq_fit_df) <- c("intercept", "slope")
#'' quantreg::lprq
lprq_fit <- lapply(1:3, function(bw){
fit <- lprq(airquality2$Temp, airquality2$Ozone, h = bw, tau = 0.95)
return(data.table(x = fit$xx, y = fit$fv, bw = paste0("bw=", bw), fit = "quantreg::lprq"))
})
#'' Quantile LOESS
ql_fit <- Quantile.loess(airquality2$Ozone, jitter(airquality2$Temp), window.size = 10,
the.quant = .95, window.alignment = c("center"))
ql_fit_df <- data.table(x = ql_fit$x, y = ql_fit$y.loess, bw = "bw=1", fit = "Quantile LOESS")
I want to have all these fits in a plot.
geom_quantile can calculate quantiles using the rq method internally, so we don't need to create the rq_fit_df separately. However, the lprq and Quantile LOESS methods aren't available within geom_quantile, so I've used the data frames you provided and plotted them using geom_line.
In addition, to include the rq line in the color and linetype mappings and in the legend we add aes(colour="rq", linetype="rq") as a sort of "artificial" mapping inside geom_quantile.
library(dplyr) # For bind_rows()
ggplot(airquality2, aes(Temp, Ozone)) +
geom_point() +
geom_quantile(quantiles=0.95, formula=y ~ x, aes(colour="rq", linetype="rq")) +
geom_line(data=bind_rows(lprq_fit, ql_fit_df),
aes(x, y, colour=paste0(gsub("q.*:","",fit),": ", bw),
linetype=paste0(gsub("q.*:","",fit),": ", bw))) +
theme_bw() +
scale_linetype_manual(values=c(2,4,5,1,1)) +
labs(colour="Method", linetype="Method",
title="Different methods of estimating the 95th percentile by quantile regression")

How to plot the intersection of a hyperplane and a plane in R

I have a set of (2-dimensional) data points that I run through a classifier that uses higher order polynomial transformations. I want to visualize the results as a 2 dimensional scatterplot of the points with the classifier superimbosed on top, preferably using ggplot2 as all other visualizations are made by this. Pretty much like this one that was used in the ClatechX online course on machine learning (the background color is optional).
I can display the points with colors and symbols and all, that's easy but I can't figure out how to draw anything like the classifiers (the intersection of the classifiing hyperplane with the plane representing my threshold). The only thing I found was stat_function and that only takes a function with a single argument.
Edit:
The example that was asked for in the comments:
sample data:
"","x","y","x","x","y","value"
"1",4.17338115745224,0.303530843229964,1.26674990184152,17.4171102853774,0.0921309727918932,-1
"2",4.85514814266935,3.452660451876,16.7631779801937,23.5724634872656,11.9208641959486,1
"3",3.51938610081561,3.41200957307592,12.0081790673332,12.3860785266141,11.6418093267617,1
"4",3.18545089452527,0.933340128976852,2.97310914874565,10.1470974014319,0.87112379635852,-16
"5",2.77556006214581,2.49701633118093,6.93061880335166,7.70373365857888,6.23509055818427,-1
"6",2.45974169578403,4.56341833807528,11.2248303614692,6.05032920997851,20.8247869282818,1
"7",2.73947941488586,3.35344674880616,9.18669833727041,7.50474746458339,11.2456050970786,-1
"8",2.01721803518012,3.55453519499861,7.17027250203368,4.06916860145595,12.6347204524838,-1
"9",3.52376445778646,1.47073399974033,5.1825201951431,12.4169159539591,2.1630584979922,-1
"10",3.77387718763202,0.509284208528697,1.92197605658768,14.2421490273294,0.259370405056702,-1
"11",4.15821685106494,1.03675272315741,4.31104264382058,17.2907673804804,1.0748562089743,-1
"12",2.57985028671101,3.88512040604837,10.0230289934507,6.65562750184287,15.0941605694935,1
"13",3.99800728890114,2.39457673509605,9.5735352407471,15.9840622821066,5.73399774026327,1
"14",2.10979392635636,4.58358959294856,9.67042948411309,4.45123041169019,21.0092935565863,1
"15",2.26988795562647,2.96687697409652,6.73447830932721,5.15239133109813,8.80235897942413,-1
"16",1.11802248633467,0.114183261757717,0.127659454208164,1.24997427994995,0.0130378172656312,-1
"17",0.310411276295781,2.09426849964075,0.650084557879535,0.0963551604515758,4.38596054858751,-1
"18",1.93197490065359,1.72926536411978,3.340897280049,3.73252701675543,2.99035869954433,-1
"19",3.45879891654477,1.13636834081262,3.93046958599847,11.9632899450912,1.29133300600123,-1
"20",0.310697768582031,0.730971727753058,0.227111284709427,0.0965331034018534,0.534319666774291,-1
"21",3.88408110360615,0.915658151498064,3.55649052359657,15.0860860193904,0.838429850404852,-1
"22",0.287852146429941,2.16121324687265,0.622109872005114,0.0828588582043242,4.67084269845782,-1
"23",2.80277011333965,1.22467750683427,3.4324895146344,7.85552030822994,1.4998349957458,-1
"24",0.579150241101161,0.57801398797892,0.334756940497835,0.335415001767533,0.334100170299295-,1
"25",2.37193428212777,1.58276639413089,3.7542178708388,5.62607223873297,2.50514945839009,-1
"26",0.372461311053485,2.51207412336953,0.935650421453748,0.138727428231681,6.31051640130279,-1
"27",3.56567220995203,1.03982002707198,3.70765737388213,12.7140183088242,1.08122568869998,-1
"28",0.634770628530532,2.26303249713965,1.43650656059435,0.402933750845047,5.12131608311011,-1
"29",2.43812176748179,1.91849716124125,4.67752968967431,5.94443775306852,3.68063135769073,-1
"30",1.08741064323112,3.01656032912433,3.28023980783858,1.18246190701233,9.0996362192467,-1
"31",0.98,2.74,2.6852,0.9604,7.5076,1
"32",3.16,1.78,5.6248,9.9856,3.1684,1
"33",4.26,4.28,18.2328,18.1476,18.3184,-1
The code to generate a classifier:
perceptron_train <- function(data, maxIter=10000) {
set.seed(839)
X <- as.matrix(data[1:5])
Y <- data["value"]
d <- dim(X)
X <- cbind(rep(1, d[1]), X)
W <- rep(0, d[2] + 1)
count <- 0
while (count < maxIter){
H <- sign(X %*% W)
indexs <- which(H != Y)
if (length(indexs) == 0){
break
} else {
i <- sample(indexs, 1)
W <- W + 0.1 * (X[i,] * Y[i,])
}
count <- count + 1
point <- as.data.frame(data[i,])
plot_it(data, point, W, paste("plot", sprintf("%05d", count), ".png", sep=""))
}
W
}
The code to generate the plot:
plot_it <- function(data, point, weights, name = "plot.png") {
line <- weights_to_line(weights)
point <- point
png(name)
p = ggplot() + geom_point(data = data, aes(x, y, color = value, size = 2)) + theme(legend.position = "none")
p = p + geom_abline(intercept = line[2], slope = line[1])
print(p)
dev.off()
}
This was solved using material from the question and answers from Issues plotting a fitted SVM model's decision boundary using ggplot2's stat_contour(). I skipped the call to geom_point for the grid-entires and some of the aesthetical definitions like scale_fill_manual and scale_colour_manual. Removing the dots for the grid entries solved the problem with the vanishing contour-line in my case.
train_and_plot_svm <- function(train, kernel = "sigmoid", type ="C", cost, gamma) {
fit <- svm(as.factor(value) ~ x + y, data = train, kernel = kernel, type = type, cost = cost)
grid <- expand.grid (x = seq(from = -0.1, to = 15, length = 100), y = seq(from = -0.1, to = 15, length = 100))
decisionValues <- as.vector(attributes(predict(fit, grid, decision.values = TRUE))$decision)
p <- predict(fit, grid)
grid$value <- p
grid$z <- decisionValues
p <- ggplot() + stat_contour(data = grid, aes(x = x, y = y, z = z), breaks = c(0))
p <- p + geom_point(data = train, aes(x, y, colour = as.factor(value)), alpha = 0.7)
p <- p + xlim(0,15) + ylim(0,15) + theme(legend.position="none")
}
Note that this function doesn't return the result of the svm training but the ggplot2 object.
This is, what I got:

Display regression equation and R^2 for each scatter plot when using facet_wrap

I have a data.frame (which I melted using the melt function), from which I produce multiple scatter plots and fit a regression line using the following:
ggplot(dat, aes(id, value)) + geom_point() + geom_smooth(method="lm", se=FALSE) + facet_wrap(variable~var1, scales="free")
I would like to add the regression equation and the R^2 in each of these scatter plots for the relevant regression (i.e. the one produced by geom_smooth in each scatter plot).
var1 above is just the name of one of the id columns of the melted data and I am facing the same question with facet_grid instad of facet_wrap.
I actually solved this, please see below a worked out example where the dependent variable is var1. This was a time series dataset, please ignore the date part if not relevant for your problem.
library(plyr)
library(ggplot2)
rm(dat)
dat <- read.table("data.txt", header = TRUE, sep = ",")
dat <- transform(dat, date = as.POSIXct(strptime(date, "%Y-%m-%dT%H:%M:%OS")))
rm(dat.m)
dat.m <- melt(dat, id = c('ccy','date','var1'))
lm_eqn = function(df){
m = lm(var1 ~ value, df);
eq <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(r)^2~"="~r2,
list(a = format(coef(m)[1], digits = 2),
b = format(coef(m)[2], digits = 2),
r2 = format(summary(m)$r.squared, digits = 3)))
as.character(as.expression(eq));
}
mymax = function(df){
max(df$value)
}
rm(regs)
regs <- ddply(dat.m, .(ccy,variable), lm_eqn)
regs.xpos <- ddply(dat.m, .(variable), function(df) (min(df$value)+max(df$value))/2)
regs.ypos <- ddply(dat.m, .(ccy,variable), function(df) min(df$var1) + 0.05*(max(df$var1)-min(df$var1)))
regs$y <- regs.ypos$V1
regs$x <- regs.xpos$V1
rm(gp)
gp <- ggplot(data=dat.m, aes(value, var1)) + geom_point(size = 1, alpha=0.75) + geom_smooth() + geom_smooth(method="lm", se=FALSE, color="red") + geom_text(data=regs, size=3, color="red", aes(x=x, y=y, label=V1), parse=TRUE) + facet_grid(ccy~variable, scales="free")
ggsave("data.png", gp, scale=1.5, width=11, height=8)
Nice solution. I'm surprised ggplot doesn't have a function built in to do this... I needed to display equations and R2 values from polynomial fits (generated by the ns(x,order) function in the splines package), and have expanded your lm_eqn function to accomodate polynomials of varying orders.
Disclaimer: I'm still quite new to R coding, and I'm aware that this code is very messy. There must be a nicer way to do it, and I'm going to start another thread to ask people to refine the code, and possibly expand it to other fit models... You can follow it here: https://groups.google.com/forum/?fromgroups#!forum/ggplot2
lm_eqn = function(df,x.var,y.var,signif.figs,eq.plot=T,model.type,order){
if(missing(x.var) | missing(y.var) | class(x.var)!='character' | class(y.var)!='character') stop('x.var and y.var must be the names of the columns you want to use as x and y as a character string.' )
if(missing(model.type)) stop("model.type must be 'lin' (linear y~x model) or 'poly' (polynomial y~ns(x,order) model, generated by splines package).")
if(model.type=='poly' & missing(order)) stop("order must be specified if poly method is used.")
if(eq.plot==T) {
# Linear y=mx+c equation
if(model.type=='lin') {
fit = lm(df[[y.var]] ~ df[[x.var]]);
eq <- substitute(italic(y) == c + m %.% italic(x)*","~~italic(r)^2~"="~r2,
list(c = signif(coef(fit)[1], signif.figs),
m = signif(coef(fit)[2], signif.figs),
r2 = signif(summary(fit)$r.squared, signif.figs)))
as.character(as.expression(eq));
}
# polynomial expression generated with the ns(x,order) function [splines package]
if(model.type=='poly') {
fit = lm(df[[y.var]] ~ ns(df[[x.var]],order));
base = gsub('!c!',signif(coef(fit)[1],signif.figs),"italic(y) == !c! + ")
element.1 = "!m! %.% italic(x)~"
element.2 = " + !m! %.% italic(x)^!o!~"
element.r2 = gsub('!r2!',signif(summary(fit)$r.squared,signif.figs),"~~italic(r)^2~\"=\"~!r2!")
eq=""
for(o in 1:(order)) {
if(o==1) {
if(coef(fit)[(o+1)]<0) tmp=gsub("[+]","",base) else tmp=base
eq=paste(tmp,gsub('!m!',signif(coef(fit)[(o+1)],signif.figs),element.1),sep="")
}
if(o>1) {
if(coef(fit)[(o+1)]<0) tmp=gsub("[+]","",element.2) else tmp=element.2
eq=paste(eq,gsub('!o!',o,gsub('!m!',signif(coef(fit)[(o+1)],signif.figs),tmp)),sep="")
}
if(o==(order)) eq=paste(eq,"\",\"",element.r2,sep="")
}
}
}
if(eq.plot==F) {
# Linear y=mx+c equations
if(model.type=='lin') {
fit = lm(df[[y.var]] ~ df[[x.var]]);
eq <- substitute(italic(r)^2~"="~r2,
list(r2 = signif(summary(fit)$r.squared, signif.figs)))
as.character(as.expression(eq));
}
# polynomial expression generated with the ns() function [splines package]
if(model.type=='poly') {
fit = lm(df[[y.var]] ~ ns(df[[x.var]],order));
eq = gsub('!r2!',signif(summary(fit)$r.squared,signif.figs),"italic(r)^2~\"=\"~!r2!")
}
}
return(eq)
}

Resources