I'm plotting an incidence curve using the survplot package in R. I'm using the xlim option to limit the x-axis of my graph from 0-28. However, when I do this the x-axis will always extend to 30. The maximum potential value I have in my data is 28. Is there a way I can trim the x-axis to 28 instead of 30?
Here is my code and an example of the graph with the extra x-axis.
survplot(Survobj,
ylim=c(0,10),
xlim=c(0,28),
ylab = "Cumulative Incidence, %",
conf=c("bands"),
fun=function(x) {100*(1-x)},
n.risk=FALSE,
time.inc=1,
cex.n.risk=0.9)
I would attach an image, but I need 10 reputations points to do so (sorry!)
The code for survplot.rms (which has the same parameters as you are using and does exhibit the behavior you're describing) is base-R-grphics and it uses the pretty function to build the x-axis:
pretty(c(0,28))
#[1] 0 5 10 15 20 25 30
So if you want to change its behavior you will need to hack the code. It's not that hard to hack R code, but it's unclear to me whether you are ready for that adventure since you didn't even name the package from which you got the function correctly. It is a fairly long function. Experience has taught me that I need to provide newbies with a turnkey solution rather than just telling them to add a parameter and find the sections in the code to tweak. Here's how to add a 'notpretty' parameter that is used to determine whether just the max or the pretty function is used on the xlim argument:
survplot2 <- function (fit, ..., xlim, ylim = if (loglog) c(-5, 1.5) else if (what ==
"survival" & missing(fun)) c(0, 1), xlab, ylab, time.inc,
what = c("survival", "hazard"), type = c("tsiatis", "kaplan-meier"),
conf.type = c("log", "log-log", "plain", "none"), conf.int = FALSE,
conf = c("bands", "bars"), add = FALSE, label.curves = TRUE,
abbrev.label = FALSE, levels.only = FALSE, lty, lwd = par("lwd"),
col = 1, col.fill = gray(seq(0.95, 0.75, length = 5)), adj.subtitle = TRUE,
loglog = FALSE, fun, n.risk = FALSE, logt = FALSE, dots = FALSE,
dotsize = 0.003, grid = NULL, srt.n.risk = 0, sep.n.risk = 0.056,
adj.n.risk = 1, y.n.risk, cex.n.risk = 0.6, pr = FALSE,notpretty=FALSE)
{
what <- match.arg(what)
polyg <- ordGridFun(grid = FALSE)$polygon
ylim <- ylim
type <- match.arg(type)
conf.type <- match.arg(conf.type)
conf <- match.arg(conf)
opar <- par(c("mar", "xpd"))
on.exit(par(opar))
psmfit <- inherits(fit, "psm")
if (what == "hazard" && !psmfit)
stop("what=\"hazard\" may only be used for fits from psm")
if (what == "hazard" & conf.int > 0) {
warning("conf.int may only be used with what=\"survival\"")
conf.int <- FALSE
}
if (loglog) {
fun <- function(x) logb(-logb(ifelse(x == 0 | x == 1,
NA, x)))
use.fun <- TRUE
}
else if (!missing(fun)) {
use.fun <- TRUE
if (loglog)
stop("cannot specify loglog=T with fun")
}
else {
fun <- function(x) x
use.fun <- FALSE
}
if (what == "hazard" & loglog)
stop("may not specify loglog=T with what=\"hazard\"")
if (use.fun | logt | what == "hazard") {
dots <- FALSE
grid <- NULL
}
cox <- inherits(fit, "cph")
if (cox) {
if (n.risk | conf.int > 0)
surv.sum <- fit$surv.summary
exactci <- !(is.null(fit$x) | is.null(fit$y))
ltype <- "s"
}
else {
if (n.risk)
stop("the n.risk option applies only to fits from cph")
exactci <- TRUE
ltype <- "l"
}
par(xpd = NA)
ciupper <- function(surv, d) ifelse(surv == 0, 0, pmin(1,
surv * exp(d)))
cilower <- function(surv, d) ifelse(surv == 0, 0, surv *
exp(-d))
labelc <- is.list(label.curves) || label.curves
units <- fit$units
if (missing(ylab)) {
if (loglog)
ylab <- "log(-log Survival Probability)"
else if (use.fun)
ylab <- ""
else if (what == "hazard")
ylab <- "Hazard Function"
else ylab <- "Survival Probability"
}
if (missing(xlab)) {
if (logt)
xlab <- paste("log Survival Time in ", units, "s",
sep = "")
else xlab <- paste(units, "s", sep = "")
}
maxtime <- fit$maxtime
maxtime <- max(pretty(c(0, maxtime)))
if (missing(time.inc))
time.inc <- fit$time.inc
if (missing(xlim))
xlim <- if (logt)
logb(c(maxtime/100, maxtime))
else c(0, maxtime)
if (length(grid) && is.logical(grid))
grid <- if (grid)
gray(0.8)
else NULL
if (is.logical(conf.int)) {
if (conf.int)
conf.int <- 0.95
else conf.int <- 0
}
zcrit <- qnorm((1 + conf.int)/2)
xadj <- Predict(fit, type = "model.frame", np = 5, factors = rmsArgs(substitute(list(...))))
info <- attr(xadj, "info")
varying <- info$varying
if (length(varying) > 1)
stop("cannot vary more than one predictor")
adjust <- if (adj.subtitle)
info$adjust
else NULL
if (length(xadj)) {
nc <- nrow(xadj)
covpres <- TRUE
}
else {
nc <- 1
covpres <- FALSE
}
y <- if (length(varying))
xadj[[varying]]
else ""
curve.labels <- NULL
xd <- xlim[2] - xlim[1]
if (n.risk & !add) {
mar <- opar$mar
if (mar[4] < 4) {
mar[4] <- mar[4] + 2
par(mar = mar)
}
}
lty <- if (missing(lty))
seq(nc + 1)[-2]
else rep(lty, length = nc)
col <- rep(col, length = nc)
lwd <- rep(lwd, length = nc)
i <- 0
if (levels.only)
y <- gsub(".*=", "", y)
abbrevy <- if (abbrev.label)
abbreviate(y)
else y
abbrevy <- if (is.factor(abbrevy))
as.character(abbrevy)
else format(abbrevy)
if (labelc || conf == "bands")
curves <- vector("list", nc)
for (i in 1:nc) {
ci <- conf.int
ay <- if (length(varying))
xadj[[varying]]
else ""
if (covpres) {
adj <- xadj[i, , drop = FALSE]
w <- survest(fit, newdata = adj, fun = fun, what = what,
conf.int = ci, type = type, conf.type = conf.type)
}
else w <- survest(fit, fun = fun, what = what, conf.int = ci,
type = type, conf.type = conf.type)
time <- w$time
if (logt)
time <- logb(time)
s <- !is.na(time) & (time >= xlim[1])
surv <- w$surv
if (is.null(ylim))
ylim <- range(surv, na.rm = TRUE)
stratum <- w$strata
if (is.null(stratum))
stratum <- 1
if (!is.na(stratum)) {
cl <- if (is.factor(ay))
as.character(ay)
else format(ay)
curve.labels <- c(curve.labels, abbrevy[i])
if (i == 1 & !add) {
plot(time, surv, xlab = xlab, xlim = xlim, ylab = ylab,
ylim = ylim, type = "n", axes = FALSE)
mgp.axis(1, at = if (logt)
pretty(xlim)
# This is the line that was changed -----------------------
else seq(xlim[1], if(notpretty){max(xlim)}else{max(pretty(xlim))}, time.inc),
# end of modifications ------------------------
labels = TRUE)
mgp.axis(2, at = pretty(ylim))
if (!logt & (dots || length(grid))) {
xlm <- pretty(xlim)
xlm <- c(xlm[1], xlm[length(xlm)])
xp <- seq(xlm[1], xlm[2], by = time.inc)
yd <- ylim[2] - ylim[1]
if (yd <= 0.1)
yi <- 0.01
else if (yd <= 0.2)
yi <- 0.025
else if (yd <= 0.4)
yi <- 0.05
else yi <- 0.1
yp <- seq(ylim[2], ylim[1] + if (n.risk &&
missing(y.n.risk))
yi
else 0, by = -yi)
if (dots)
for (tt in xp) symbols(rep(tt, length(yp)),
yp, circles = rep(dotsize, length(yp)),
inches = dotsize, add = TRUE)
else abline(h = yp, v = xp, col = grid, xpd = FALSE)
}
}
tim <- time[s]
srv <- surv[s]
if (conf.int > 0 && conf == "bands") {
blower <- w$lower[s]
bupper <- w$upper[s]
}
if (max(tim) > xlim[2]) {
if (ltype == "s") {
s.last <- srv[tim <= xlim[2] + 1e-06]
s.last <- s.last[length(s.last)]
k <- tim < xlim[2]
tim <- c(tim[k], xlim[2])
srv <- c(srv[k], s.last)
if (conf.int > 0 && conf == "bands") {
low.last <- blower[time <= xlim[2] + 1e-06]
low.last <- low.last[length(low.last)]
up.last <- bupper[time <= xlim[2] + 1e-06]
up.last <- up.last[length(up.last)]
blower <- c(blower[k], low.last)
bupper <- c(bupper[k], up.last)
}
}
else tim[tim > xlim[2]] <- NA
}
if (conf != "bands")
lines(tim, srv, type = ltype, lty = lty[i], col = col[i],
lwd = lwd[i])
if (labelc || conf == "bands")
curves[[i]] <- list(tim, srv)
if (pr) {
zest <- rbind(tim, srv)
dimnames(zest) <- list(c("Time", "Survival"),
rep("", length(srv)))
cat("\nEstimates for ", cl, "\n\n")
print(zest, digits = 3)
}
if (conf.int > 0) {
if (conf == "bands") {
polyg(x = c(tim, rev(tim)), y = c(blower, rev(bupper)),
col = col.fill[i], type = ltype)
}
else {
if (exactci) {
tt <- seq(0, maxtime, time.inc)
v <- survest(fit, newdata = adj, times = tt,
what = what, fun = fun, conf.int = ci,
type = type, conf.type = conf.type)
tt <- v$time
ss <- v$surv
lower <- v$lower
upper <- v$upper
if (!length(ylim))
ylim <- range(ss, na.rm = TRUE)
if (logt)
tt <- logb(ifelse(tt == 0, NA, tt))
}
else {
tt <- as.numeric(dimnames(surv.sum)[[1]])
if (logt)
tt <- logb(tt)
ss <- surv.sum[, stratum, "Survival"]^exp(w$linear.predictors)
se <- surv.sum[, stratum, "std.err"]
ss <- fun(ss)
lower <- fun(cilower(ss, zcrit * se))
upper <- fun(ciupper(ss, zcrit * se))
ss[is.infinite(ss)] <- NA
lower[is.infinite(lower)] <- NA
upper[is.infinite(upper)] <- NA
}
tt <- tt + xd * (i - 1) * 0.01
errbar(tt, ss, upper, lower, add = TRUE, lty = lty[i],
col = col[i])
}
}
if (n.risk) {
if (length(Y <- fit$y)) {
tt <- seq(max(0, xlim[1]), min(maxtime, xlim[2]),
by = time.inc)
ny <- ncol(Y)
if (!length(str <- fit$Strata))
Y <- Y[, ny - 1]
else Y <- Y[unclass(str) == unclass(stratum),
ny - 1]
nrisk <- rev(cumsum(table(cut(-Y, sort(unique(-c(tt,
range(Y) + c(-1, 1))))))[-length(tt) - 1]))
}
else {
if (is.null(surv.sum))
stop("you must use surv=T or y=T in fit to use n.risk=T")
tt <- as.numeric(dimnames(surv.sum)[[1]])
l <- (tt >= xlim[1]) & (tt <= xlim[2])
tt <- tt[l]
nrisk <- surv.sum[l, stratum, 2]
}
tt[1] <- xlim[1]
yd <- ylim[2] - ylim[1]
if (missing(y.n.risk))
y.n.risk <- ylim[1]
yy <- y.n.risk + yd * (nc - i) * sep.n.risk
nri <- nrisk
nri[tt > xlim[2]] <- NA
text(tt[1], yy, nri[1], cex = cex.n.risk, adj = adj.n.risk,
srt = srt.n.risk)
text(tt[-1], yy, nri[-1], cex = cex.n.risk, adj = 1)
text(xlim[2] + xd * 0.025, yy, adj = 0, curve.labels[i],
cex = cex.n.risk)
}
}
}
if (conf == "bands")
for (i in 1:length(y)) lines(curves[[i]][[1]], curves[[i]][[2]],
type = ltype, lty = lty[i], col = col[i], lwd = lwd[i])
if (labelc)
labcurve(curves, curve.labels, type = ltype, lty = lty,
col. = col, lwd = lwd, opts = label.curves)
if (length(adjust))
title(sub = paste("Adjusted to:", adjust), adj = 0, cex = 0.6)
invisible(list(adjust = adjust, curve.labels = curve.labels))
}
environment(survplot2) <- environment(rms:::survplot.rms)
Tested with the first example in rms::survplot using xlim=c(0,26) and xlim=c(0,28). Needed to assign the environment because otherwise you get this error:
Error in Predict(fit, type = "model.frame", np = 5,
factors = rmsArgs(substitute(list(...)))) :
could not find function "rmsArgs"
Related
I've been generating calibration plots for my cph models of survival data. However, the default setting puts the "ideal" line in grey, which makes it difficult to discriminate. I've tried to specify the colour parameters in plot(), but this obviously only changes the line for "observed". What can I pass in plot() to change the line of the "ideal" line in a calibration plot generated in rms?
Here is one option:
Let's say you have code to create a cph model of survival data and use calibrate from the rms package:
library(rms)
set.seed(1)
n <- 200
d.time <- rexp(n)
x1 <- runif(n)
x2 <- factor(sample(c('a', 'b', 'c'), n, TRUE))
f <- cph(Surv(d.time) ~ pol(x1,2) * x2, x=TRUE, y=TRUE, surv=TRUE,time.inc=1.5)
cal <- calibrate(f, u=1.5, cmethod='KM', m=50, B=20)
This will generate a calibrate object:
R> class(cal)
[1] "calibrate"
If you are using plot on this object, you can discover the function being called in rms:
R> getAnywhere("plot.calibrate.default")
A single object matching ‘plot.calibrate.default’ was found
It was found in the following places
registered S3 method for plot from namespace rms
namespace:rms
with value
function (x, xlab, ylab, xlim, ylim, legend = TRUE, subtitles = TRUE,
cex.subtitles = 0.75, riskdist = TRUE, scat1d.opts = list(nhistSpike = 200),
...)
You can create your own function based on this function, and alter the color of the ideal line. In this case, we make the ideal line green (and revise the text labels to match):
myplot <- function (x, xlab, ylab, subtitles = TRUE, conf.int = TRUE, cex.subtitles = 0.75,
riskdist = TRUE, add = FALSE, scat1d.opts = list(nhistSpike = 200),
par.corrected = NULL, ...)
{
at <- attributes(x)
u <- at$u
units <- at$units
if (length(par.corrected) && !is.list(par.corrected))
stop("par.corrected must be a list")
z <- list(col = "blue", lty = 1, lwd = 1, pch = 4)
if (!length(par.corrected))
par.corrected <- z
else for (n in setdiff(names(z), names(par.corrected))) par.corrected[[n]] <- z[[n]]
predicted <- at$predicted
if ("KM" %in% colnames(x)) {
type <- "stratified"
pred <- x[, "mean.predicted"]
cal <- x[, "KM"]
cal.corrected <- x[, "KM.corrected"]
se <- x[, "std.err"]
}
else {
type <- "smooth"
pred <- x[, "pred"]
cal <- x[, "calibrated"]
cal.corrected <- x[, "calibrated.corrected"]
se <- NULL
}
un <- if (u == 1)
paste(units, "s", sep = "")
else units
if (missing(xlab))
xlab <- paste("Predicted ", format(u), units, "Survival")
if (missing(ylab))
ylab <- paste("Fraction Surviving ", format(u), " ",
un, sep = "")
if (length(se) && conf.int) {
ciupper <- function(surv, d) ifelse(surv == 0, 0, pmin(1,
surv * exp(d)))
cilower <- function(surv, d) ifelse(surv == 0, 0, surv *
exp(-d))
errbar(pred, cal, cilower(cal, 1.959964 * se), ciupper(cal,
1.959964 * se), xlab = xlab, ylab = ylab, type = "b",
add = add, ...)
}
else if (add)
lines(pred, cal, type = if (type == "smooth")
"l"
else "b")
else plot(pred, cal, xlab = xlab, ylab = ylab, type = if (type ==
"smooth")
"l"
else "b", ...)
err <- NULL
if (riskdist && length(predicted)) {
do.call("scat1d", c(list(x = predicted), scat1d.opts))
if (type == "smooth") {
s <- !is.na(pred + cal.corrected)
err <- predicted - approxExtrap(pred[s], cal.corrected[s],
xout = predicted, ties = mean)$y
}
}
if (subtitles && !add) {
if (type == "smooth") {
Col <- par.corrected$col
substring(Col, 1, 1) <- toupper(substring(Col, 1,
1))
title(sub = sprintf("Black: observed Green: ideal\n%s : optimism corrected",
Col), adj = 0, cex.sub = cex.subtitles)
w <- if (length(err))
paste("B=", at$B, " based on ", at$what, "\nMean |error|=",
round(mean(abs(err)), 3), " 0.9 Quantile=",
round(quantile(abs(err), 0.9, na.rm = TRUE),
3), sep = "")
else paste("B=", at$B, "\nBased on ", at$what, sep = "")
title(sub = w, adj = 1, cex.sub = cex.subtitles)
}
else {
title(sub = paste("n=", at$n, " d=", at$d, " p=",
at$p, ", ", at$m, " subjects per group\nGreen: ideal",
sep = ""), adj = 0, cex.sub = cex.subtitles)
title(sub = paste("X - resampling optimism added, B=",
at$B, "\nBased on ", at$what, sep = ""), adj = 1,
cex.sub = cex.subtitles)
}
}
abline(0, 1, col = "green")
if (type == "stratified")
points(pred, cal.corrected, pch = par.corrected$pch,
col = par.corrected$col)
else lines(pred, cal.corrected, col = par.corrected$col,
lty = par.corrected$lty, lwd = par.corrected$lwd)
invisible()
}
Then you can use your custom function with your calibrate object:
myplot(cal)
I am trying to explore EnQuireR package for questionnaire analysis (for tea data). But ENbarplot & XvsYbarplot gives error:
ENbarplot(tea, 20, numr=1, numc=1, spl=TRUE)
Error in hsv(h = a * m, s = 0.4 + (cont[j]/max) * 0.6, v = 1, 1, 1) :
unused argument (1)
&
XvsYbarplot("socio.professional.category","sex",tea, legend.text=TRUE)
Error in hsv(h = a * i, s = 1, v = 1, 1, 1) : unused argument (1)
Also, I am facing problems in interpreting the output of chisq.desc() function. Does colored cells represent significant association between corresponding variables? Can anyone please explain in detail?
The issue was with the hsv function in the graphic devices utilities. There is an extra argument passed the function making it to fail. I tried fixing it. The code below should work if you're still interested in using the package
barplot_function1 <- function (dataset, X, spl = FALSE, numr = NULL, numc = NULL,
cex = 1, colour = NULL)
{
mult = length(X)
col = rep(0)
count_mod <- rep(0)
a <- 1/mult
for (m in 1:mult) {
if (spl) {
cont <- sort(table(dataset[, X[m]]))
}
else {
cont <- table(dataset[, X[m]])
}
NR <- dim(cont)
count_mod <- c(count_mod, NR)
max <- max(cont)
coli <- rep(0, NR)
for (j in 1:NR) {
if (cont[j] == max) {
coli[j] <- hsv(h = a * m, s = 1, v = 1, 1)
}
else {
coli[j] <- hsv(h = a * m, s = 0.4 + (cont[j]/max) *
0.6, v = 1, 1)
}
}
col <- c(col, coli)
}
col <- col[-1]
count_mod <- count_mod[-1]
summ <- cumsum(count_mod)
na = matrix(0, 1, mult)
if (is.null(numr)) {
if (is.null(numc)) {
numr = numc = 2
}
}
par(mfrow = c(numr, numc), yaxt = "n")
tpolice <- par("cex")
par(xpd = T, mar = par()$mar + c(0, 1, 0, 0))
for (m in 1:mult) {
for (i in 1:((dim(dataset)[2])/(numr * numc))) {
if (m == ((numr * numc) * i) + 1) {
x11()
par(mfrow = c(numr, numc), yaxt = "n")
}
}
k = 0
for (i in 1:length(dataset[, X[m]])) {
if ((is.na(dataset[, X[m]])[[i]]) == TRUE) {
k = k + 1
}
}
na[m] = k/length(dataset[, X[m]])
if (spl == TRUE) {
coord = barplot(sort(table(dataset[, X[m]])), beside = TRUE,
las = 2, horiz = TRUE, main = names(dataset)[X[m]],
col = col[(summ[m] - count_mod[m] + 1:summ[m])])
text(x = 2, y = coord, labels = names(sort(table(dataset[,
X[m]]))), adj = 0, cex = cex, col = colour)
}
if (spl == FALSE) {
coord = barplot(table(dataset[, X[m]]), beside = TRUE,
las = 2, horiz = TRUE, main = names(dataset)[X[m]],
col = col[(summ[m] - count_mod[m] + 1:summ[m])])
text(x = 2, y = coord, labels = levels(dataset[,
X[m]]), adj = 0, cex = cex, col = colour)
}
mtext(paste(c("Percentage of missing values =", round(na[m],
2) * 100, "%"), collapse = " "), side = 3, line = -0.1,
cex = tpolice, adj = 0)
}
}
ENbarplotfixed <- function (dataset, X, spl = FALSE, numr = NULL, numc = NULL,
report = FALSE)
{
if (report == FALSE) {
barplot_function1(dataset, X, spl, numr, numc)
}
if `enter code here`(report == TRUE) {
assign("X", X, envir = .GlobalEnv)
assign("dataset", dataset, envir = .GlobalEnv)
a = getwd()
dir.create(paste(a, "/EnQuireR/", sep = ""))
file.copy(paste(.libPaths()[1], "/EnQuireR/Sweave/sty/fancyvrb.sty",
sep = ""), paste(a, "/EnQuireR/fancyvrb.sty", sep = ""))
file.copy(paste(.libPaths()[1], "/EnQuireR/Sweave/sty/Sweave.sty",
sep = ""), paste(a, "/EnQuireR/Sweave.sty", sep = ""))
file.copy(paste(.libPaths()[1], "/EnQuireR/Sweave/sty/upquote.sty",
sep = ""), paste(a, "/EnQuireR/upquote.sty", sep = ""))
file.copy(paste(.libPaths()[1], "/EnQuireR/Sweave/sty/algorithmic.sty",
sep = ""), paste(a, "/EnQuireR/algorithmic.sty",
sep = ""))
setwd(paste(a, "/EnQuireR", sep = ""))
Sweave(paste(.libPaths()[1], "/EnQuireR/Sweave/barplot/Univariate_report.Rnw",
sep = ""), driver = RweaveLatex(), syntax = getOption("SweaveSyntax"))
tools::texi2dvi(paste(a, "/EnQuireR/Univariate_report.tex",
sep = ""), pdf = TRUE)
setwd(a)
}
}
#plot
ENbarplotfixed(tea,20,spl=T,numr=1,numc=1)
#plot upon condition
XvsYbarplotfixed <- XvsYbarplotfixed <- function (var1, var2, dataset, width = 1, space = NULL, names.arg = NULL,
legend.text = NULL, horiz = FALSE, density = NULL, angle = 45,
col = NULL, border = par("fg"), main = NULL, sub = NULL,
xlab = NULL, ylab = NULL, xlim = NULL, ylim = NULL, xpd = TRUE,
log = "", axes = TRUE, axisnames = TRUE, cex.axis = par("cex.axis"),
cex.names = par("cex.axis"), inside = TRUE, plot = TRUE,
axis.lty = 0, offset = 0, add = FALSE, ...)
{
dataset <- as.data.frame(dataset)
if (is.character(var1) & is.character(var2)) {
num_var1 <- match(var1, names(dataset))
num_var2 <- match(var2, names(dataset))
height <- table(dataset[, num_var1], dataset[, num_var2])
}
else {
height <- table(var1, var2)
}
if (!missing(inside))
.NotYetUsed("inside", error = FALSE)
if (is.null(space))
space <- if (is.matrix(height))
c(0, 1)
else 0.2
space <- space * mean(width)
if (plot && axisnames && is.null(names.arg))
names.arg <- if (is.matrix(height))
colnames(height)
else names(height)
if (is.vector(height) || (is.array(height) && (length(dim(height)) ==
1))) {
height <- cbind(height)
if (is.null(col))
if (is.null(col))
col <- rep(0)
NR <- length(height)
max <- max(height)
for (i in 1:NR) {
if (height[i] == max) {
coli <- hsv(h = 1, s = 1, v = 1, 1)
col <- cbind(col, coli)
}
else {
coli <- hsv(h = 1, s = 0.4 + (height[i]/max) *
0.6, v = 1, 1)
col <- c(col, coli)
}
}
col <- col[-1]
}
else if (is.matrix(height)) {
if (is.null(col))
NR <- nrow(height)
NC <- ncol(height)
col = rep(0)
a <- 1/NC
for (i in 1:NC) {
max <- max(height[, i])
coli <- rep(0, NR)
for (j in 1:NR) {
if (height[j, i] == max) {
coli[j] <- hsv(h = a * i, s = 1, v = 1, 1)
}
else {
coli[j] <- hsv(h = a * i, s = 0.4 + (height[j,
i]/max) * 0.6, v = 1, 1)
}
}
col <- c(col, coli)
}
col <- col[-1]
}
else stop("'height' must be a vector or a matrix")
if (is.logical(legend.text))
legend.text <- if (legend.text && is.matrix(height))
rownames(height)
stopifnot(is.character(log))
logx <- logy <- FALSE
if (log != "") {
logx <- length(grep("x", log)) > 0L
logy <- length(grep("y", log)) > 0L
}
if ((logx || logy) && !is.null(density))
stop("Cannot use shading lines in bars when log scale is used")
NR <- nrow(height)
NC <- ncol(height)
if (length(space) == 2)
space <- rep.int(c(space[2], rep.int(space[1], NR - 1)),
NC)
width <- rep(width, length.out = NR)
offset <- rep(as.vector(offset), length.out = length(width))
delta <- width/2
w.r <- cumsum(space + width)
w.m <- w.r - delta
w.l <- w.m - delta
log.dat <- (logx && horiz) || (logy && !horiz)
if (log.dat) {
if (min(height + offset, na.rm = TRUE) <= 0)
stop("log scale error: at least one 'height + offset' value <= 0")
if (logx && !is.null(xlim) && min(xlim) <= 0)
stop("log scale error: 'xlim' <= 0")
if (logy && !is.null(ylim) && min(ylim) <= 0)
stop("log scale error: 'ylim' <= 0")
rectbase <- if (logy && !horiz && !is.null(ylim))
ylim[1]
else if (logx && horiz && !is.null(xlim))
xlim[1]
else 0.9 * min(height, na.rm = TRUE)
}
else rectbase <- 0
rAdj <- offset + (if (log.dat)
0.9 * height
else -0.01 * height)
delta <- width/2
w.r <- cumsum(space + width)
w.m <- w.r - delta
w.l <- w.m - delta
num_mod <- nlevels(var1)
if (horiz) {
if (is.null(xlim))
xlim <- range(rAdj, height + offset, na.rm = TRUE)
if (is.null(ylim))
ylim <- c(min(w.l), max(w.r) + num_mod + (num_mod -
1))
}
else {
if (is.null(xlim))
xlim <- c(min(w.l), max(w.r))
if (is.null(ylim))
ylim <- range(rAdj, height + offset + num_mod * 5,
na.rm = TRUE)
}
w.m <- matrix(w.m, ncol = NC)
par(mar = par("mar") + c(1, 0, 0, 0))
if (plot) {
opar <- if (horiz)
par(xaxs = "i", xpd = xpd)
else par(yaxs = "i", xpd = xpd)
on.exit(par(opar))
if (!add) {
plot.new()
if (horiz) {
if (is.character(attributes(var1)$levels) ==
TRUE) {
if (max(nchar(attributes(var1)$levels)) > 8) {
par(mar = par("mar") + c(0, round(max(nchar(attributes(var1)$levels))/3),
0, 0))
}
}
else {
par(mar = par("mar") + c(0, 5, 0, 0))
}
plot.window(xlim, ylim, log = log, ...)
}
else plot.window(xlim, ylim, log = log, ...)
}
xyrect <- function(x1, y1, x2, y2, horizontal = TRUE,
...) {
if (horizontal)
rect(x1, y1, x2, y2, ...)
else rect(y1, x1, y2, x2, ...)
}
xyrect(rectbase + offset, w.l, c(height) + offset, w.r,
horizontal = horiz, angle = angle, density = density,
col = col, border = border)
if (axisnames && !is.null(names.arg)) {
at.l <- if (length(names.arg) != length(w.m)) {
if (length(names.arg) == NC)
colMeans(w.m)
else stop("incorrect number of names")
}
else w.m
if (!horiz)
if (is.character(attributes(var1)$levels) ==
TRUE) {
for (i in 1:nlevels(var2)) {
if (nchar(attributes(var2)$levels[i]) > 11)
names.arg[i] <- substring(attributes(var2)$levels[i],
1, 11)
}
}
if (horiz) {
axis(2, at = at.l, labels = names.arg, lty = axis.lty,
cex.axis = cex.names, las = 2)
}
else {
axis(1, at = at.l, labels = names.arg, lty = axis.lty,
cex.axis = cex.names, las = 0)
}
}
if (!is.null(legend.text)) {
legend.col <- rep(col, length.out = length(legend.text))
if (!horiz) {
legend.text <- legend.text
legend.col <- legend.col
density <- rev(density)
angle <- rev(angle)
}
num.legend <- c("1st", "2nd", "3rd")
for (i in 4:20) {
num.legendi <- c(paste(i, "th"))
num.legend <- c(num.legend, num.legendi)
}
num.legend <- num.legend[1:dim(height)[1]]
xy <- par("usr")
if (horiz) {
legend2(xy[2] - xinch(0.1), xy[4] - yinch(0.1),
legend = paste(num.legend, "bar:", legend.text),
angle = angle, density = density, fill = legend.col,
bty = "n", cex = 1 - 0.04 * num_mod, xjust = 1,
yjust = 1)
}
else {
legend2(xy[2] - xinch(0.1), xy[4] - yinch(0.1),
legend = paste(num.legend, "bar:", legend.text),
angle = angle, density = density, fill = legend.col,
bty = "n", xjust = 1, yjust = 1)
}
}
if (is.character(var1) & is.character(var2)) {
title(main = paste(names(dataset[num_var1]), "depending on",
names(dataset[num_var2])), sub = sub, xlab = xlab,
ylab = ylab, ...)
}
else {
for (i in 1:length(dataset)) {
a <- match(var1, dataset[, i])
b <- match(var2, dataset[, i])
if (any(is.na(a)) == FALSE) {
rep1 = i
}
if (any(is.na(b)) == FALSE) {
rep2 = i
}
}
title(main = paste(names(dataset[rep1]), "depending on",
names(dataset[rep2])), sub = sub, xlab = xlab,
ylab = ylab, ...)
}
if (axes)
if (horiz) {
axis(1, cex.axis = cex.axis, las = 0, ...)
}
else {
axis(2, cex.axis = cex.axis, las = 2, ...)
}
invisible(w.m)
}
else w.m
}
I'm having some trouble when plotting the "bgnbd.PlotFrequencyInCalibration" in the "BTYD" package.
There is no NA in the dataset and other plots works without error.
Below is my code for the plots:
CustData<- read.csv("~/ltv/CustData")
> cal.cbs<-cbind(CustData$t.x,CustData$x,CustData$T.cal,CustData$x.star)
> colnames(cal.cbs)<-c("t.x","x","T.cal","x.star")
est.params<-c(0.0313,0.9165,1.088,0.7903)
bgnbd.PlotFrequencyInCalibration(est.params,cal.cbs,7)
Error in plot.window(xlim, ylim, log = log, ...) :
need finite 'ylim' values
Any help would be appreciated. Thank you.
Kara
subset of the data
I fixed for pnbd.pnbd.PlotFrequencyInCalibration. Repeat the same for bgnbd. If you look at the actual function for pnbd.PlotFrequencyInCalibration :
"https://github.com/cran/BTYD/blob/master/R/pnbd.R" (check here)
pnbd.PlotFrequencyInCalibration <- function(params, cal.cbs, censor, plotZero = TRUE,
xlab = "Calibration period transactions", ylab = "Customers", title = "Frequency of Repeat Transactions") {
tryCatch(x <- cal.cbs[, "x"], error = function(e) stop("Error in pnbd.PlotFrequencyInCalibration: cal.cbs must have a frequency column labelled \"x\""))
tryCatch(T.cal <- cal.cbs[, "T.cal"], error = function(e) stop("Error in pnbd.PlotFrequencyInCalibration: cal.cbs must have a column for length of time observed labelled \"T.cal\""))
dc.check.model.params(c("r", "alpha", "s", "beta"), params, "pnbd.PlotFrequencyInCalibration")
if (censor > max(x))
stop("censor too big (> max freq) in PlotFrequencyInCalibration.")
n.x <- rep(0, max(x) + 1)
custs = nrow(cal.cbs)
for (ii in unique(x)) {
n.x[ii + 1] <- sum(ii == x)
}
n.x.censor <- sum(n.x[(censor + 1):length(n.x)])
n.x.actual <- c(n.x[1:censor], n.x.censor)
T.value.counts <- table(T.cal)
T.values <- as.numeric(names(T.value.counts))
n.T.values <- length(T.values)
total.probability <- 0
n.x.expected <- rep(0, length(n.x.actual))
for (ii in 1:(censor)) {
this.x.expected <- 0
for (T.idx in 1:n.T.values) {
T <- T.values[T.idx]
if (T == 0)
next
n.T <- T.value.counts[T.idx]
expected.given.x.and.T <- n.T * pnbd.pmf(params, T, ii - 1)
this.x.expected <- this.x.expected + expected.given.x.and.T
total.probability <- total.probability + expected.given.x.and.T/custs
}
n.x.expected[ii] <- this.x.expected
}
n.x.expected[censor + 1] <- custs * (1 - total.probability)
col.names <- paste(rep("freq", length(censor + 1)), (0:censor), sep = ".")
col.names[censor + 1] <- paste(col.names[censor + 1], "+", sep = "")
censored.freq.comparison <- rbind(n.x.actual, n.x.expected)
colnames(censored.freq.comparison) <- col.names
cfc.plot <- censored.freq.comparison
if (plotZero == FALSE)
cfc.plot <- cfc.plot[, -1]
n.ticks <- ncol(cfc.plot)
if (plotZero == TRUE) {
x.labels <- 0:(n.ticks - 1)
x.labels[n.ticks] <- paste(n.ticks - 1, "+", sep = "")
} else {
x.labels <- 1:(n.ticks)
x.labels[n.ticks] <- paste(n.ticks, "+", sep = "")
}
ylim <- c(0, ceiling(max(cfc.plot,na.rm = TRUE) * 1.1))
barplot(cfc.plot, names.arg = x.labels, beside = TRUE, ylim = ylim, main = title,
xlab = xlab, ylab = ylab, col = 1:2)
legend("topright", legend = c("Actual", "Model"), col = 1:2, lwd = 2)
return(censored.freq.comparison)
}
There is a line:
ylim <- c(0, ceiling(max(cfc.plot) * 1.1))
Add to it, na.rm=TRUE
ylim <- c(0, ceiling(max(cfc.plot,na.rm = TRUE) * 1.1))
Run the function again, should work now
Take plot.acf as an example. Both acf and pacf call this function internally. How can i plot them side by side?
Example:
TS <- ts.union(mdeaths, fdeaths)
acf(TS)
pacf(TS)
I tried to use par(mfrow = c(2,4)) and layout to combine them, but stats:::plot.acf overwrites this. The expected output would be:
A different approach than my other answer: Plot the ACF using ggplot2.
ggacf <- function(x, ci=0.95, type="correlation", xlab="Lag", ylab=NULL,
ylim=NULL, main=NULL, ci.col="blue", lag.max=NULL) {
x <- as.data.frame(x)
x.acf <- acf(x, plot=F, lag.max=lag.max, type=type)
ci.line <- qnorm((1 - ci) / 2) / sqrt(x.acf$n.used)
d.acf <- data.frame(lag=x.acf$lag, acf=x.acf$acf)
g <- ggplot(d.acf, aes(x=lag, y=acf)) +
geom_hline(yintercept=0) +
geom_segment(aes(xend=lag, yend=0)) +
geom_hline(yintercept=ci.line, color=ci.col, linetype="dashed") +
geom_hline(yintercept=-ci.line, color=ci.col, linetype="dashed") +
theme_bw() +
xlab("Lag") +
ggtitle(ifelse(is.null(main), "", main)) +
if (is.null(ylab))
ylab(ifelse(type=="partial", "PACF", "ACF"))
else
ylab(ylab)
g
}
This seeks to create a similar interface to plot.acf(). Then you can use all of the great features available to ggplot2 plots from the gridExtra package.
library(ggplot2)
library(gridExtra)
grid.arrange(ggacf(lh), ggacf(lh, type="partial"), ncol=2)
Then you get this:
Unfortunately grid.arrange() doesn't work with base graphics, hence the ggplot2 suggestion.
This isn't an ideal solution, but you can redefine what it means to plot an ACF/PACF by defining plot.acf().
First store the existing version.
old.plot.acf <- plot.acf
Now you can use stats:::plot.acf to get the source and copy/paste into the editor. Remove the part that resets mfrow.
plot.acf <- function(x, ci = 0.95, type = "h", xlab = "Lag", ylab = NULL,
ylim = NULL, main = NULL, ci.col = "blue",
ci.type = c("white", "ma"), max.mfrow = 6,
ask = Npgs > 1 && dev.interactive(),
mar = if (nser > 2) c(3, 2, 2, 0.8) else par("mar"),
oma = if (nser > 2) c(1, 1.2, 1, 1) else par("oma"),
mgp = if (nser > 2) c(1.5, 0.6, 0) else par("mgp"),
xpd = par("xpd"), cex.main = if (nser > 2) 1 else
par("cex.main"), verbose = getOption("verbose"), ...)
{
ci.type <- match.arg(ci.type)
if ((nser <- ncol(x$lag)) < 1L)
stop("x$lag must have at least 1 column")
if (is.null(ylab))
ylab <- switch(x$type, correlation = "ACF", covariance = "ACF (cov)",
partial = "Partial ACF")
if (is.null(snames <- x$snames))
snames <- paste("Series ", if (nser == 1L)
x$series
else 1L:nser)
with.ci <- ci > 0 && x$type != "covariance"
with.ci.ma <- with.ci && ci.type == "ma" && x$type == "correlation"
if (with.ci.ma && x$lag[1L, 1L, 1L] != 0L) {
warning("can use ci.type=\"ma\" only if first lag is 0")
with.ci.ma <- FALSE
}
clim0 <- if (with.ci)
qnorm((1 + ci)/2)/sqrt(x$n.used)
else c(0, 0)
Npgs <- 1L
nr <- nser
if (nser > 1L) {
sn.abbr <- if (nser > 2L)
abbreviate(snames)
else snames
if (nser > max.mfrow) {
Npgs <- ceiling(nser/max.mfrow)
nr <- ceiling(nser/Npgs)
}
### NOT INCLUDED: mfrow = rep(nr, 2L)
opar <- par(mar = mar, oma = oma,
mgp = mgp, ask = ask, xpd = xpd, cex.main = cex.main)
on.exit(par(opar))
if (verbose) {
message("par(*) : ", appendLF = FALSE, domain = NA)
str(par("mfrow", "cex", "cex.main", "cex.axis", "cex.lab",
"cex.sub"))
}
}
if (is.null(ylim)) {
ylim <- range(x$acf[, 1L:nser, 1L:nser], na.rm = TRUE)
if (with.ci)
ylim <- range(c(-clim0, clim0, ylim))
if (with.ci.ma) {
for (i in 1L:nser) {
clim <- clim0 * sqrt(cumsum(c(1, 2 * x$acf[-1,
i, i]^2)))
ylim <- range(c(-clim, clim, ylim))
}
}
}
for (I in 1L:Npgs) for (J in 1L:Npgs) {
dev.hold()
iind <- (I - 1) * nr + 1L:nr
jind <- (J - 1) * nr + 1L:nr
if (verbose)
message("Page [", I, ",", J, "]: i =", paste(iind,
collapse = ","), "; j =", paste(jind, collapse = ","),
domain = NA)
for (i in iind) for (j in jind) if (max(i, j) > nser) {
frame()
box(col = "light gray")
}
else {
clim <- if (with.ci.ma && i == j)
clim0 * sqrt(cumsum(c(1, 2 * x$acf[-1, i, j]^2)))
else clim0
plot(x$lag[, i, j], x$acf[, i, j], type = type, xlab = xlab,
ylab = if (j == 1)
ylab
else "", ylim = ylim, ...)
abline(h = 0)
if (with.ci && ci.type == "white")
abline(h = c(clim, -clim), col = ci.col, lty = 2)
else if (with.ci.ma && i == j) {
clim <- clim[-length(clim)]
lines(x$lag[-1, i, j], clim, col = ci.col, lty = 2)
lines(x$lag[-1, i, j], -clim, col = ci.col, lty = 2)
}
title(if (!is.null(main))
main
else if (i == j)
snames[i]
else paste(sn.abbr[i], "&", sn.abbr[j]), line = if (nser >
2)
1
else 2)
}
if (Npgs > 1) {
mtext(paste("[", I, ",", J, "]"), side = 1, line = -0.2,
adj = 1, col = "dark gray", cex = 1, outer = TRUE)
}
dev.flush()
}
invisible()
}
Now that this is defined locally, you can set mfrow as needed, do your plotting, then reset the function or clear it from the namespace.
plot.acf <- old.plot.acf
To avoid also having to change plot.pacf() as well, you can just use acf(..., type="partial"), which gets the PACF.
You can use the PerformanceAnalytics package:
library(PerformanceAnalytics)
chart.ACFplus(TS)
I am plotting correlation plot with corrplot. I want to plot also the correlation coefficients:
require(corrplot)
test <- matrix(data = rnorm(400), nrow=20, ncol=20)
corrplot(cor(test), method = "color", addCoef.col="grey", order = "AOE")
But they are too big in the plot:
Is there any way to make the font of the coefficent smaller? I've been looking at ?corrplot but there are only parameters to change the legend and axis font sizes (cl.cex and tl.cex). pch.cex doesn't work either.
The option to use is number.cex=.
As in the following:
corrplot(cor(test),
method = "color",
addCoef.col="grey",
order = "AOE",
number.cex=0.75)
To make it dynamic, try number.cex= 7/ncol(df) where df is dataframe for which the correlation was run.
It is far from the answer, it is kind of a dirty hack, but this works (thanks user20650 for the idea):
cex.before <- par("cex")
par(cex = 0.7)
corrplot(cor(envV), p.mat = cor1[[1]], insig = "blank", method = "color",
addCoef.col="grey",
order = "AOE", tl.cex = 1/par("cex"),
cl.cex = 1/par("cex"), addCoefasPercent = TRUE)
par(cex = cex.before)
I had exactly the same problem a little while ago when I had to do a corrplot similar to yours. After a lot of searching I found a solution which involves printing the correlation plot to a png file and altering the parameters there.
i.e.:
library(corrplot)
test <- matrix(data = rnorm(400), nrow=20, ncol=20)
png(height=1200, width=1500, pointsize=15, file="overlap.png")
corrplot(cor(test), method = "color", addCoef.col="grey", order = "AOE")
The part that increases/decreases the font inside the cells is parameter pointsize. setting it to 15 you can see that the numbers now fit the cells.
You may also find this link helpful. it certainly helped me.
I would define my own size value since the function just ommited allowing for a size to be added to that text. Below is the function recreated with an extra number.cex paramater added at the end, which controls the number label size now.
corrplot2 <- function (corr, method = c("circle", "square", "ellipse", "number",
"shade", "color", "pie"), type = c("full", "lower", "upper"),
add = FALSE, col = NULL, bg = "white", title = "", is.corr = TRUE,
diag = TRUE, outline = FALSE, mar = c(0, 0, 0, 0), addgrid.col = NULL,
addCoef.col = NULL, addCoefasPercent = FALSE, order = c("original",
"AOE", "FPC", "hclust", "alphabet"), hclust.method = c("complete",
"ward", "single", "average", "mcquitty", "median", "centroid"),
addrect = NULL, rect.col = "black", rect.lwd = 2, tl.pos = NULL,
tl.cex = 1, tl.col = "red", tl.offset = 0.4, tl.srt = 90,
cl.pos = NULL, cl.lim = NULL, cl.length = NULL, cl.cex = 0.8,
cl.ratio = 0.15, cl.align.text = "c", cl.offset = 0.5, addshade = c("negative",
"positive", "all"), shade.lwd = 1, shade.col = "white",
p.mat = NULL, sig.level = 0.05, insig = c("pch", "p-value",
"blank", "n"), pch = 4, pch.col = "black", pch.cex = 3,
plotCI = c("n", "square", "circle", "rect"), lowCI.mat = NULL,
uppCI.mat = NULL, number.cex = 0.7, ...)
{
method <- match.arg(method)
type <- match.arg(type)
order <- match.arg(order)
hclust.method <- match.arg(hclust.method)
plotCI <- match.arg(plotCI)
insig <- match.arg(insig)
if (!is.matrix(corr) & !is.data.frame(corr))
stop("Need a matrix or data frame!")
if (is.null(addgrid.col)) {
addgrid.col <- ifelse(method == "color" | method == "shade",
"white", "grey")
}
if (any(corr < cl.lim[1]) | any(corr > cl.lim[2]))
stop("color limits should cover matrix")
if (is.null(cl.lim)) {
if (is.corr)
cl.lim <- c(-1, 1)
if (!is.corr)
cl.lim <- c(min(corr), max(corr))
}
intercept <- 0
zoom <- 1
if (!is.corr) {
if (max(corr) * min(corr) < 0) {
intercept <- 0
zoom <- 1/max(abs(cl.lim))
}
if (min(corr) >= 0) {
intercept <- -cl.lim[1]
zoom <- 1/(diff(cl.lim))
}
if (max(corr) <= 0) {
intercept <- -cl.lim[2]
zoom <- 1/(diff(cl.lim))
}
corr <- (intercept + corr) * zoom
}
cl.lim2 <- (intercept + cl.lim) * zoom
int <- intercept * zoom
if (min(corr) < -1 - .Machine$double.eps || max(corr) > 1 +
.Machine$double.eps) {
stop("The matrix is not in [-1, 1]!")
}
if (is.null(col)) {
col <- colorRampPalette(c("#67001F", "#B2182B", "#D6604D",
"#F4A582", "#FDDBC7", "#FFFFFF", "#D1E5F0", "#92C5DE",
"#4393C3", "#2166AC", "#053061"))(200)
}
n <- nrow(corr)
m <- ncol(corr)
min.nm <- min(n, m)
ord <- 1:min.nm
if (!order == "original") {
ord <- corrMatOrder(corr, order = order, hclust.method = hclust.method)
corr <- corr[ord, ord]
}
if (is.null(rownames(corr)))
rownames(corr) <- 1:n
if (is.null(colnames(corr)))
colnames(corr) <- 1:m
getPos.Dat <- function(mat) {
x <- matrix(1:n * m, n, m)
tmp <- mat
if (type == "upper")
tmp[row(x) > col(x)] <- Inf
if (type == "lower")
tmp[row(x) < col(x)] <- Inf
if (type == "full")
tmp <- tmp
if (!diag)
diag(tmp) <- Inf
Dat <- tmp[is.finite(tmp)]
ind <- which(is.finite(tmp), arr.ind = TRUE)
Pos <- ind
Pos[, 1] <- ind[, 2]
Pos[, 2] <- -ind[, 1] + 1 + n
return(list(Pos, Dat))
}
Pos <- getPos.Dat(corr)[[1]]
n2 <- max(Pos[, 2])
n1 <- min(Pos[, 2])
nn <- n2 - n1
newrownames <- as.character(rownames(corr)[(n + 1 - n2):(n +
1 - n1)])
m2 <- max(Pos[, 1])
m1 <- min(Pos[, 1])
mm <- m2 - m1
newcolnames <- as.character(colnames(corr)[m1:m2])
DAT <- getPos.Dat(corr)[[2]]
len.DAT <- length(DAT)
assign.color <- function(DAT) {
newcorr <- (DAT + 1)/2
newcorr[newcorr == 1] <- 1 - 0.0000000001
col.fill <- col[floor(newcorr * length(col)) + 1]
}
col.fill <- assign.color(DAT)
isFALSE = function(x) identical(x, FALSE)
isTRUE = function(x) identical(x, TRUE)
if (isFALSE(tl.pos)) {
tl.pos <- "n"
}
if (is.null(tl.pos) | isTRUE(tl.pos)) {
if (type == "full")
tl.pos <- "lt"
if (type == "lower")
tl.pos <- "ld"
if (type == "upper")
tl.pos <- "td"
}
if (isFALSE(cl.pos)) {
cl.pos <- "n"
}
if (is.null(cl.pos) | isTRUE(cl.pos)) {
if (type == "full")
cl.pos <- "r"
if (type == "lower")
cl.pos <- "b"
if (type == "upper")
cl.pos <- "r"
}
if (outline)
col.border <- "black"
if (!outline)
col.border <- col.fill
if (!add) {
par(mar = mar, bg = "white")
plot.new()
xlabwidth <- ylabwidth <- 0
for (i in 1:50) {
xlim <- c(m1 - 0.5 - xlabwidth, m2 + 0.5 + mm * cl.ratio *
(cl.pos == "r"))
ylim <- c(n1 - 0.5 - nn * cl.ratio * (cl.pos == "b"),
n2 + 0.5 + ylabwidth)
plot.window(xlim + c(-0.2, 0.2), ylim + c(-0.2, 0.2),
asp = 1, xaxs = "i", yaxs = "i")
x.tmp <- max(strwidth(newrownames, cex = tl.cex))
y.tmp <- max(strwidth(newcolnames, cex = tl.cex))
if (min(x.tmp - xlabwidth, y.tmp - ylabwidth) < 0.0001)
break
xlabwidth <- x.tmp
ylabwidth <- y.tmp
}
if (tl.pos == "n" | tl.pos == "d")
xlabwidth <- ylabwidth <- 0
if (tl.pos == "td")
ylabwidth <- 0
if (tl.pos == "ld")
xlabwidth <- 0
laboffset <- strwidth("W", cex = tl.cex) * tl.offset
xlim <- c(m1 - 0.5 - xlabwidth - laboffset, m2 + 0.5 +
mm * cl.ratio * (cl.pos == "r")) + c(-0.35, 0.15)
ylim <- c(n1 - 0.5 - nn * cl.ratio * (cl.pos == "b"),
n2 + 0.5 + ylabwidth * abs(sin(tl.srt * pi/180)) +
laboffset) + c(-0.15, 0.35)
if (.Platform$OS.type == "windows") {
windows.options(width = 7, height = 7 * diff(ylim)/diff(xlim))
}
plot.window(xlim = xlim, ylim = ylim, asp = 1, xlab = "",
ylab = "", xaxs = "i", yaxs = "i")
}
laboffset <- strwidth("W", cex = tl.cex) * tl.offset
symbols(Pos, add = TRUE, inches = FALSE, squares = rep(1,
len.DAT), bg = bg, fg = bg)
if (method == "circle" & plotCI == "n") {
symbols(Pos, add = TRUE, inches = FALSE, bg = col.fill,
circles = 0.9 * abs(DAT)^0.5/2, fg = col.border)
}
if (method == "ellipse" & plotCI == "n") {
ell.dat <- function(rho, length = 99) {
k <- seq(0, 2 * pi, length = length)
x <- cos(k + acos(rho)/2)/2
y <- cos(k - acos(rho)/2)/2
return(cbind(rbind(x, y), c(NA, NA)))
}
ELL.dat <- lapply(DAT, ell.dat)
ELL.dat2 <- 0.85 * matrix(unlist(ELL.dat), ncol = 2,
byrow = TRUE)
ELL.dat2 <- ELL.dat2 + Pos[rep(1:length(DAT), each = 100),
]
polygon(ELL.dat2, border = col.border, col = col.fill)
}
if (method == "number" & plotCI == "n") {
text(Pos[, 1], Pos[, 2], font = 2, col = col.fill, labels = round((DAT -
int) * ifelse(addCoefasPercent, 100, 1)/zoom, ifelse(addCoefasPercent,
0, 2)))
}
if (method == "pie" & plotCI == "n") {
symbols(Pos, add = TRUE, inches = FALSE, circles = rep(0.5,
len.DAT) * 0.85)
pie.dat <- function(theta, length = 100) {
k <- seq(pi/2, pi/2 - theta, length = 0.5 * length *
abs(theta)/pi)
x <- c(0, cos(k)/2, 0)
y <- c(0, sin(k)/2, 0)
return(cbind(rbind(x, y), c(NA, NA)))
}
PIE.dat <- lapply(DAT * 2 * pi, pie.dat)
len.pie <- unlist(lapply(PIE.dat, length))/2
PIE.dat2 <- 0.85 * matrix(unlist(PIE.dat), ncol = 2,
byrow = TRUE)
PIE.dat2 <- PIE.dat2 + Pos[rep(1:length(DAT), len.pie),
]
polygon(PIE.dat2, border = "black", col = col.fill)
}
if (method == "shade" & plotCI == "n") {
addshade <- match.arg(addshade)
symbols(Pos, add = TRUE, inches = FALSE, squares = rep(1,
len.DAT), bg = col.fill, fg = addgrid.col)
shade.dat <- function(w) {
x <- w[1]
y <- w[2]
rho <- w[3]
x1 <- x - 0.5
x2 <- x + 0.5
y1 <- y - 0.5
y2 <- y + 0.5
dat <- NA
if ((addshade == "positive" || addshade == "all") &
rho > 0) {
dat <- cbind(c(x1, x1, x), c(y, y1, y1), c(x,
x2, x2), c(y2, y2, y))
}
if ((addshade == "negative" || addshade == "all") &
rho < 0) {
dat <- cbind(c(x1, x1, x), c(y, y2, y2), c(x,
x2, x2), c(y1, y1, y))
}
return(t(dat))
}
pos_corr <- rbind(cbind(Pos, DAT))
pos_corr2 <- split(pos_corr, 1:nrow(pos_corr))
SHADE.dat <- matrix(na.omit(unlist(lapply(pos_corr2,
shade.dat))), byrow = TRUE, ncol = 4)
segments(SHADE.dat[, 1], SHADE.dat[, 2], SHADE.dat[,
3], SHADE.dat[, 4], col = shade.col, lwd = shade.lwd)
}
if (method == "square" & plotCI == "n") {
symbols(Pos, add = TRUE, inches = FALSE, squares = abs(DAT)^0.5,
bg = col.fill, fg = col.border)
}
if (method == "color" & plotCI == "n") {
symbols(Pos, add = TRUE, inches = FALSE, squares = rep(1,
len.DAT), bg = col.fill, fg = col.border)
}
symbols(Pos, add = TRUE, inches = FALSE, bg = NA, squares = rep(1,
len.DAT), fg = addgrid.col)
if (plotCI != "n") {
if (is.null(lowCI.mat) || is.null(uppCI.mat))
stop("Need lowCI.mat and uppCI.mat!")
if (!order == "original") {
lowCI.mat <- lowCI.mat[ord, ord]
uppCI.mat <- uppCI.mat[ord, ord]
}
pos.lowNew <- getPos.Dat(lowCI.mat)[[1]]
lowNew <- getPos.Dat(lowCI.mat)[[2]]
pos.uppNew <- getPos.Dat(uppCI.mat)[[1]]
uppNew <- getPos.Dat(uppCI.mat)[[2]]
if (!(method == "circle" || method == "square"))
stop("method shoud be circle or square if draw confidence interval!")
k1 <- (abs(uppNew) > abs(lowNew))
bigabs <- uppNew
bigabs[which(!k1)] <- lowNew[!k1]
smallabs <- lowNew
smallabs[which(!k1)] <- uppNew[!k1]
sig <- sign(uppNew * lowNew)
if (plotCI == "circle") {
symbols(pos.uppNew[, 1], pos.uppNew[, 2], add = TRUE,
inches = FALSE, circles = 0.95 * abs(bigabs)^0.5/2,
bg = ifelse(sig > 0, col.fill, col[ceiling((bigabs +
1) * length(col)/2)]), fg = ifelse(sig > 0,
col.fill, col[ceiling((bigabs + 1) * length(col)/2)]))
symbols(pos.lowNew[, 1], pos.lowNew[, 2], add = TRUE,
inches = FALSE, circles = 0.95 * abs(smallabs)^0.5/2,
bg = ifelse(sig > 0, bg, col[ceiling((smallabs +
1) * length(col)/2)]), fg = ifelse(sig > 0,
col.fill, col[ceiling((smallabs + 1) * length(col)/2)]))
}
if (plotCI == "square") {
symbols(pos.uppNew[, 1], pos.uppNew[, 2], add = TRUE,
inches = FALSE, squares = abs(bigabs)^0.5, bg = ifelse(sig >
0, col.fill, col[ceiling((bigabs + 1) * length(col)/2)]),
fg = ifelse(sig > 0, col.fill, col[ceiling((bigabs +
1) * length(col)/2)]))
symbols(pos.lowNew[, 1], pos.lowNew[, 2], add = TRUE,
inches = FALSE, squares = abs(smallabs)^0.5,
bg = ifelse(sig > 0, bg, col[ceiling((smallabs +
1) * length(col)/2)]), fg = ifelse(sig > 0,
col.fill, col[ceiling((smallabs + 1) * length(col)/2)]))
}
if (plotCI == "rect") {
rect.width <- 0.25
rect(pos.uppNew[, 1] - rect.width, pos.uppNew[, 2] +
smallabs/2, pos.uppNew[, 1] + rect.width, pos.uppNew[,
2] + bigabs/2, col = col.fill, border = col.fill)
segments(pos.lowNew[, 1] - rect.width, pos.lowNew[,
2] + DAT/2, pos.lowNew[, 1] + rect.width, pos.lowNew[,
2] + DAT/2, col = "black", lwd = 1)
segments(pos.uppNew[, 1] - rect.width, pos.uppNew[,
2] + uppNew/2, pos.uppNew[, 1] + rect.width,
pos.uppNew[, 2] + uppNew/2, col = "black", lwd = 1)
segments(pos.lowNew[, 1] - rect.width, pos.lowNew[,
2] + lowNew/2, pos.lowNew[, 1] + rect.width,
pos.lowNew[, 2] + lowNew/2, col = "black", lwd = 1)
segments(pos.lowNew[, 1] - 0.5, pos.lowNew[, 2],
pos.lowNew[, 1] + 0.5, pos.lowNew[, 2], col = "grey70",
lty = 3)
}
}
if (!is.null(p.mat) & !insig == "n") {
if (!order == "original")
p.mat <- p.mat[ord, ord]
pos.pNew <- getPos.Dat(p.mat)[[1]]
pNew <- getPos.Dat(p.mat)[[2]]
ind.p <- which(pNew > (sig.level))
if (insig == "pch") {
points(pos.pNew[, 1][ind.p], pos.pNew[, 2][ind.p],
pch = pch, col = pch.col, cex = pch.cex, lwd = 2)
}
if (insig == "p-value") {
text(pos.pNew[, 1][ind.p], pos.pNew[, 2][ind.p],
round(pNew[ind.p], 2), col = pch.col)
}
if (insig == "blank") {
symbols(pos.pNew[, 1][ind.p], pos.pNew[, 2][ind.p],
inches = FALSE, squares = rep(1, length(pos.pNew[,
1][ind.p])), fg = addgrid.col, bg = bg, add = TRUE)
}
}
if (cl.pos != "n") {
colRange <- assign.color(cl.lim2)
ind1 <- which(col == colRange[1])
ind2 <- which(col == colRange[2])
colbar <- col[ind1:ind2]
if (is.null(cl.length))
cl.length <- ifelse(length(colbar) > 20, 11, length(colbar) +
1)
labels <- seq(cl.lim[1], cl.lim[2], length = cl.length)
at <- seq(0, 1, length = length(labels))
if (cl.pos == "r") {
vertical <- TRUE
xlim <- c(m2 + 0.5 + mm * 0.02, m2 + 0.5 + mm * cl.ratio)
ylim <- c(n1 - 0.5, n2 + 0.5)
}
if (cl.pos == "b") {
vertical <- FALSE
xlim <- c(m1 - 0.5, m2 + 0.5)
ylim <- c(n1 - 0.5 - nn * cl.ratio, n1 - 0.5 - nn *
0.02)
}
colorlegend(colbar = colbar, labels = round(labels, 2),
offset = cl.offset, ratio.colbar = 0.3, cex = cl.cex,
xlim = xlim, ylim = ylim, vertical = vertical, align = cl.align.text)
}
if (tl.pos != "n") {
ylabwidth2 <- strwidth(newrownames, cex = tl.cex)
xlabwidth2 <- strwidth(newcolnames, cex = tl.cex)
pos.xlabel <- cbind(m1:m2, n2 + 0.5 + laboffset)
pos.ylabel <- cbind(m1 - 0.5, n2:n1)
if (tl.pos == "td") {
if (type != "upper")
stop("type should be \"upper\" if tl.pos is \"dt\".")
pos.ylabel <- cbind(m1:(m1 + nn) - 0.5, n2:n1)
}
if (tl.pos == "ld") {
if (type != "lower")
stop("type should be \"lower\" if tl.pos is \"ld\".")
pos.xlabel <- cbind(m1:m2, n2:(n2 - mm) + 0.5 + laboffset)
}
if (tl.pos == "d") {
pos.ylabel <- cbind(m1:(m1 + nn) - 0.5, n2:n1)
pos.ylabel <- pos.ylabel[1:min(n, m), ]
symbols(pos.ylabel[, 1] + 0.5, pos.ylabel[, 2], add = TRUE,
bg = bg, fg = addgrid.col, inches = FALSE, squares = rep(1,
length(pos.ylabel[, 1])))
text(pos.ylabel[, 1] + 0.5, pos.ylabel[, 2], newcolnames[1:min(n,
m)], col = tl.col, cex = tl.cex, ...)
}
else {
text(pos.xlabel[, 1], pos.xlabel[, 2], newcolnames,
srt = tl.srt, adj = ifelse(tl.srt == 0, c(0.5,
0), c(0, 0)), col = tl.col, cex = tl.cex, offset = tl.offset,
...)
text(pos.ylabel[, 1], pos.ylabel[, 2], newrownames,
col = tl.col, cex = tl.cex, pos = 2, offset = tl.offset,
...)
}
}
title(title, ...)
if (!is.null(addCoef.col) & (!method == "number")) {
text(Pos[, 1], Pos[, 2], col = addCoef.col, labels = round((DAT -
int) * ifelse(addCoefasPercent, 100, 1)/zoom, ifelse(addCoefasPercent,
0, 2)), cex = number.cex)
}
if (type == "full" & plotCI == "n" & !is.null(addgrid.col))
rect(m1 - 0.5, n1 - 0.5, m2 + 0.5, n2 + 0.5, border = addgrid.col)
if (!is.null(addrect) & order == "hclust" & type == "full") {
corrRect.hclust(corr, k = addrect, method = hclust.method,
col = rect.col, lwd = rect.lwd)
}
invisible(corr)
}