operation between stat_summary_hex plots made in ggplot2 - r

I have two populations A and B distributed spatially with one character Z, I want to be able to make an hexbin substracting the proportion of the character in each hexbin. Here I have the code for two theoretical populations A and B
library(hexbin)
library(ggplot2)
set.seed(2)
xA <- rnorm(1000)
set.seed(3)
yA <- rnorm(1000)
set.seed(4)
zA <- sample(c(1, 0), 20, replace = TRUE, prob = c(0.2, 0.8))
hbinA <- hexbin(xA, yA, xbins = 40, IDs = TRUE)
A <- data.frame(x = xA, y = yA, z = zA)
set.seed(5)
xB <- rnorm(1000)
set.seed(6)
yB <- rnorm(1000)
set.seed(7)
zB <- sample(c(1, 0), 20, replace = TRUE, prob = c(0.4, 0.6))
hbinB <- hexbin(xB, yB, xbins = 40, IDs = TRUE)
B <- data.frame(x = xB, y = yB, z = zB)
ggplot(A, aes(x, y, z = z)) + stat_summary_hex(fun = function(z) sum(z)/length(z), alpha = 0.8) +
scale_fill_gradientn(colours = c("blue","red")) +
guides(alpha = FALSE, size = FALSE)
ggplot(B, aes(x, y, z = z)) + stat_summary_hex(fun = function(z) sum(z)/length(z), alpha = 0.8) +
scale_fill_gradientn (colours = c("blue","red")) +
guides(alpha = FALSE, size = FALSE)
here is the two resulting graphs
My goal is to make a third graph with hexbins with the values of the difference between hexbins at the same coordinates but I don't even know how to start to do it, I have done something similar in the raster Package, but I need it as hexbins
Thanks a lot

You need to make sure that both plots use the exact same binning. In order to achieve this, I think it is best to do the binning beforehand and then plot the results with stat_identity / geom_hex. With the variables from your code sample you ca do:
## find the bounds for the complete data
xbnds <- range(c(A$x, B$x))
ybnds <- range(c(A$y, B$y))
nbins <- 30
# function to make a data.frame for geom_hex that can be used with stat_identity
makeHexData <- function(df) {
h <- hexbin(df$x, df$y, nbins, xbnds = xbnds, ybnds = ybnds, IDs = TRUE)
data.frame(hcell2xy(h),
z = tapply(df$z, h#cID, FUN = function(z) sum(z)/length(z)),
cid = h#cell)
}
Ahex <- makeHexData(A)
Bhex <- makeHexData(B)
## not all cells are present in each binning, we need to merge by cellID
byCell <- merge(Ahex, Bhex, by = "cid", all = T)
## when calculating the difference empty cells should count as 0
byCell$z.x[is.na(byCell$z.x)] <- 0
byCell$z.y[is.na(byCell$z.y)] <- 0
## make a "difference" data.frame
Diff <- data.frame(x = ifelse(is.na(byCell$x.x), byCell$x.y, byCell$x.x),
y = ifelse(is.na(byCell$y.x), byCell$y.y, byCell$y.x),
z = byCell$z.x - byCell$z.y)
## plot the results
ggplot(Ahex) +
geom_hex(aes(x = x, y = y, fill = z),
stat = "identity", alpha = 0.8) +
scale_fill_gradientn (colours = c("blue","red")) +
guides(alpha = FALSE, size = FALSE)
ggplot(Bhex) +
geom_hex(aes(x = x, y = y, fill = z),
stat = "identity", alpha = 0.8) +
scale_fill_gradientn (colours = c("blue","red")) +
guides(alpha = FALSE, size = FALSE)
ggplot(Diff) +
geom_hex(aes(x = x, y = y, fill = z),
stat = "identity", alpha = 0.8) +
scale_fill_gradientn (colours = c("blue","red")) +
guides(alpha = FALSE, size = FALSE)

Related

How to pass break values to stat_contour by facet or group

I am trying to use the ks library to calculate the 95% home range for groups within a data set. The problem is that the "break" values which define the cut-off for the 95% contours differ between groups. So far, I have been able to get my plots, but I have to manually add the break values for each group/level and I would really like to find a solution where I can create figures in ggplot where the break values are imported automatically.
require(ks)
require(dplyr)
require(ggplot2)
# define the ks function to pass to a grouped_df
ksFUN = function(data){
H = Hpi(data[,c("x","y")], binned = TRUE) * 1
fhata = kde(data[,c("x","y")], H = H, compute.cont = TRUE,
xmin = c(minX, minY), xmax = c(maxX, maxY))
res95 = data.frame(HR = contourSizes(fhata, cont = 95, approx = TRUE))
dimnames(fhata[['estimate']]) = list(fhata[["eval.points"]][[1]],
fhata[["eval.points"]][[2]])
dat = reshape2::melt(fhata[['estimate']])
dat$breaks50 = fhata[["cont"]]["50%"]
dat$breaks95 = fhata[["cont"]]["5%"]
return(dat)
}
set.seed(100)
# create some data
df1 = data.frame(x = rnorm(100, 0, 5),
y = rnorm(100, 0, 5),
Group = "Test1")
df2 = data.frame(x = rnorm(100, 10, 5),
y = rnorm(100, 10, 5),
Group = "Test2")
df = rbind(df1, df2)
# Set the minimum and maximum x and y values outside
# of the ksFUN to keep the data on the same scale
minX = min(df$x, na.rm = T); maxX = max(df$x, na.rm = T)
minY = min(df$y, na.rm = T); maxY = max(df$y, na.rm = T)
xx = df %>%
group_by(Group) %>%
do(as.data.frame(ksFUN(.)))
# extract the break value for the 95% contour (home range) and 50% (core area)
breaks = xx %>%
group_by(Group) %>%
summarize(breaks95 = mean(breaks95),
breaks50 = mean(breaks50))
breaks
# The only way I have been able to add the breaks is to manually add them
ggplot(data = xx, aes(x = Var1, y = Var2, fill = Group)) +
geom_point(data = df, aes(x = x, y = y, col = Group)) +
stat_contour(data = xx[xx$Group == "Test1",], aes(z = value),
breaks = 0.000587, alpha = 0.3, geom = "polygon") +
stat_contour(data = xx[xx$Group == "Test2",], aes(z = value),
breaks = 0.000527, alpha = 0.3, geom = "polygon")
I would really like to find a solution where I don't have to explicitly pass the break values to the stat_contour functions
Is there a problem with using the breaks column in breaks? e.g.
# base plot
pl <- ggplot(data = xx, aes(x = Var1, y = Var2, fill = Group)) +
geom_point(data = df, aes(x = x, y = y, col = Group))
groups <- unique(xx$Group)
# loop and add for each group
for(i in groups){
pl <- pl + stat_contour(data = xx[xx$Group == i,], aes(z = value),
breaks = breaks[breaks$Group == i, ]$breaks,
alpha = 0.3, geom = "polygon")
}
pl
I get some weird plots, at the edges, especially when I remove the breaks part from stat_contour, which leads me to think there might be a bug in ksFUN

ggplot2: display blocks of nested split violins

I have the following dataset:
df <- data.frame(dens = rnorm(5000),
split = as.factor(sample(1:2, 5000, replace = T)),
method = as.factor(sample(c("A","B"), 5000, replace = T)),
counts = sample(c(1, 10, 100, 1000, 10000), 5000, replace = T))
What i am wanting to do is to do split violin plots for splits 1 and 2 within groups A and B for each count (which would be in the logscale, but that is not important for this example). We have four groups for each setting but there is a nested aspect to it.
So, I can do the following:
df$key <- factor(paste(df$split, df$method))
and then:
library(ggplot2)
ggplot(df, aes(x = factor(counts), y = dens, fill = split)) +
geom_violin(aes(fill = key), scale = "width", draw_quantiles = c(0.25, 0.5, 0.75)) + scale_fill_manual(values = cbPalette) + theme_bw()
which gives me the following plot:
But what I want is really the light blue and the dark blue to be the two halves of a split violin plot and the light green and the dark green to be the two halves of another split violin plot and these plots should be bunched together. I would also like the different counts to be more separated from each other, but i feel that I can figure that out.
Note that this question is different than the one I have listed or Split violin plot with ggplot2 because we are bunching two different levels of nested split violin plots for each "Counts".
I was trying to follow enter link description here but
I can not tell how to add such a nested groups setting to the code there and am looking for some advice.
Here is what I have tried:
GeomSplitViolin <- ggproto("GeomSplitViolin", GeomViolin,
draw_group = function(self, data, ..., draw_quantiles = NULL){
# By #YAK: https://stackoverflow.com/questions/35717353/split-violin-plot-with-ggplot2
data <- transform(data, xminv = x - violinwidth * (x - xmin), xmaxv = x + violinwidth * (xmax - x))
grp <- data[1,'group']
newdata <- plyr::arrange(transform(data, x = if(grp%%2==1) xminv else xmaxv), if(grp%%2==1) y else -y)
newdata <- rbind(newdata[1, ], newdata, newdata[nrow(newdata), ], newdata[1, ])
newdata[c(1,nrow(newdata)-1,nrow(newdata)), 'x'] <- round(newdata[1, 'x'])
if (length(draw_quantiles) > 0 & !scales::zero_range(range(data$y))) {
stopifnot(all(draw_quantiles >= 0), all(draw_quantiles <= 1))
quantiles <- create_quantile_segment_frame(data, draw_quantiles, split = TRUE, grp = grp)
aesthetics <- data[rep(1, nrow(quantiles)), setdiff(names(data), c("x", "y")), drop = FALSE]
aesthetics$alpha <- rep(1, nrow(quantiles))
both <- cbind(quantiles, aesthetics)
quantile_grob <- GeomPath$draw_panel(both, ...)
ggplot2:::ggname("geom_split_violin", grid::grobTree(GeomPolygon$draw_panel(newdata, ...), quantile_grob))
}
else {
ggplot2:::ggname("geom_split_violin", GeomPolygon$draw_panel(newdata, ...))
}
}
)
create_quantile_segment_frame <- function (data, draw_quantiles, split = FALSE, grp = NULL) {
dens <- cumsum(data$density)/sum(data$density)
ecdf <- stats::approxfun(dens, data$y)
ys <- ecdf(draw_quantiles)
violin.xminvs <- (stats::approxfun(data$y, data$xminv))(ys)
violin.xmaxvs <- (stats::approxfun(data$y, data$xmaxv))(ys)
violin.xs <- (stats::approxfun(data$y, data$x))(ys)
if (grp %% 2 == 0) {
data.frame(x = ggplot2:::interleave(violin.xs, violin.xmaxvs),
y = rep(ys, each = 2), group = rep(ys, each = 2))
} else {
data.frame(x = ggplot2:::interleave(violin.xminvs, violin.xs),
y = rep(ys, each = 2), group = rep(ys, each = 2))
}
}
geom_split_violin <- function (mapping = NULL, data = NULL, stat = "ydensity", position = "identity", ..., draw_quantiles = NULL, trim = TRUE, scale = "area", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE) {
layer(data = data, mapping = mapping, stat = stat, geom = GeomSplitViolin, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list(trim = trim, scale = scale, draw_quantiles = draw_quantiles, na.rm = na.rm, ...))
}
library(ggplot2)
ggplot(df, aes(x = factor(counts), y = dens, fill = interaction(split,method))) +
geom_split_violin(draw_quantiles = c(0.25, 0.5, 0.75)) + scale_fill_manual(values=RColorBrewer::brewer.pal(name="Paired",n=4)) + theme_light() + theme(legend.position="bottom")
And here is what I get:
As can be seen, the green images are on top of the blues. How do I get around this? Thanks!
EDIT: Folllowing Axeman's suggestion, I am almost there:
levels(df$split) <- factor(0:3)
library(ggplot2)
ggplot(df, aes(x = interaction(split, counts), y = dens, fill = key)) + geom_split_violin(draw_quantiles = c(0.25, 0.5, 0.75)) + scale_fill_manual(values=RColorBrewer::brewer.pal(name="Paired",n=4)) + theme_light() + theme(legend.position="bottom") + scale_x_discrete(interaction(df$split,df$counts)[-length(interaction(df$split,df$counts))], drop = FALSE)
So almost there!
Would like two fixes: the white space arising from the last interaction between split and counts, and the scale to only have counts for each bunch.
Wonder if these should be separate questions on Stackoverflow.
Almost there!
library(ggplot2)
ggplot(df, aes(x = interaction(split, counts), y = dens, fill = key)) + geom_split_violin(draw_quantiles = c(0.25, 0.5, 0.75)) +scale_fill_manual(values=RColorBrewer::brewer.pal(name="Paired",n=4)) + theme_light() + theme(legend.position="bottom") + scale_x_discrete(limits=levels(interaction(df$split,df$counts))[-length(levels(interaction(df$split,df$counts)))],drop = FALSE)
This yields:
I still need to place the value of counts on the x-axis, in between the two plots.
I think that this question has become too long and the basic parts of this question have been answered. I have put up a new question on how to change the discrete scale. Hopefully, someone will know! Anyway, here is the answer to this question (thanks, Axe!). It is in the edited version of my question.
library(ggplot2)
df <- data.frame(dens = rnorm(5000),
split = factor(sample(1:2, 5000, replace = T)),
method = factor(sample(c("A","B"), 5000, replace = T)),
counts = factor(sample(c(1, 10, 100, 1000, 10000), 5000, replace = T)))
df$key <- factor(paste(df$split, df$method))
levels(df$split) <- factor(0:2)
library(ggplot2)
ggplot(df, aes(x = interaction(split, counts), y = dens, fill = key)) +
geom_split_violin(draw_quantiles = c(0.25, 0.5, 0.75)) +
scale_fill_manual(values=RColorBrewer::brewer.pal(name="Paired",n=4)) +
theme_light() +
theme(legend.position="bottom") +
scale_x_discrete(
limits = levels(interaction(df$split,df$counts))[-length(levels(interaction(df$split,df$counts)))],
drop = FALSE,
name = "Counts"
)

Generating multiple geom_smooth lines of data samples

Attempting to build a new geom function here that will take a sample of points from a dataset by group, and fit a number of local regressions through the individual subsets. This would generate multiple local regression lines as samples of a full dataset. In the end generating something akin to this:
Though I'm continuing to get errors with the function I've built below (with reprex). Any assistance is appreciated. Thank you!
library(ggplot2)
library(dplyr)
geom_mline <- function(mapping = NULL, data = NULL, stat = "mline",
position = "identity", show.legend = NA,
inherit.aes = TRUE, na.rm = TRUE,
SPAN = .9, N_size = 50, N_LOESS = 50, ...) {
layer(
geom = geomMline,
mapping = mapping,
data = data,
stat = stat,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(SPAN=SPAN,
N_size=N_size,
N_LOESS=N_LOESS,
...)
)
}
geomMline <- ggproto("geomMline", GeomLine,
required_aes = c("x", "y"),
default_aes = aes(colour = "black", size = 0.5, linetype = 1, alpha = NA)
)
stat_mline <- function(mapping = NULL, data = NULL, geom = "line",
position = "identity", show.legend = NA, inherit.aes = TRUE,
SPAN = .9, N_size = 50, N_LOESS = 50, ...) {
layer(
stat = StatMline,
data = data,
mapping = mapping,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(SPAN=SPAN,
N_size=N_size,
N_LOESS=N_LOESS,
...
)
)
}
StatMline <- ggproto("StatMline", Stat,
required_aes = c("x", "y"),
compute_group = function(self, data, scales, params,
SPAN = .9, N_size = 50, N_LOESS = 50) {
tf <- tempfile(fileext=".png")
png(tf)
plot.new()
colnames(data) <- c("x", "variable", "y")
LOESS_DF <- data.frame(y = seq(min(data$x),
max(data$x),
length.out = 50))
for(i in 1:N_LOESS){
# sample N_size points
df_sample <- sample_n(data, N_size)
# fit a loess
xx <- df_sample$x
yy <- df_sample$y
tp_est <- loess(yy ~ xx , span = SPAN)
# predict accross range of x using loess model
loess_vec <- data.frame(
predict(tp_est, newdata =
data.frame(xx = seq(min(data$x), max(data$x), length.out = 500))))
colnames(loess_vec) <- as.character(i)
# repeat x times
LOESS_DF <- cbind(LOESS_DF,loess_vec)
#str(LOESS_DF)
}
invisible(dev.off())
unlink(tf)
data.frame(reshape2::melt(LOESS_DF, id = "y"))
}
)
# dummy data
library(reshape2)
x <- seq(1,1000,1)
y1 <- rnorm(n = 1000,mean = x*2^1.1, sd = 200)
y2 <- rnorm(n = 1000,mean = x*1, sd = 287.3)
y3 <- rnorm(n = 1000,mean = x*1.1, sd = 100.1)
data <- data.frame(x , y1, y2, y3)
data <- melt(data, id.vars = "x")
str(data)
ggplot(data,aes(x,value,group = variable, color = va
riable))+geom_point()
ggplot(data = data, aes(x = x, y = value, group=variable, color = variable)) +
#geom_point(color="black") +
#geom_smooth(se=FALSE, linetype="dashed", size=0.5) +
#stat_mline(SPAN = .2, N_size = 50, N_LOESS = 5)
geom_mline(SPAN = .2, N_size = 50, N_LOESS = 5)
#data <- subset(data, variable == "y2")
You could use the existing geom_smooth geom and use lapply to generate geom_smooth calls from multiple random samples from the original data frame. For example:
# Fake data
set.seed(2)
dat = data.frame(x = runif(100, 0, 10))
dat$y = 2*dat$x - 0.5*dat$x^2 - 5 + rnorm(100, 0, 5)
ggplot(dat, aes(x, y)) +
geom_point() +
lapply(1:10, function(i) {
geom_smooth(data=dat[sample(1:nrow(dat), 20), ], se=FALSE)
})
Or, keeping it all in the tidyverse:
library(tidyverse)
ggplot(dat, aes(x, y)) +
geom_point() +
map(1:10, ~geom_smooth(data=dat[sample(1:nrow(dat), 20), ], se=FALSE))
Here's a way to plot the quantiles within ggplot. I'm not sure if it's possible to get stat_quantile to plot a ribbon. To get that, you might have to calculate the quantile regression outside of ggplot and add use geom_ribbon to add the values.
ggplot(dat, aes(x, y)) +
geom_point() +
geom_quantile(quantiles=c(0.1, 0.5, 0.9), formula=y ~ poly(x, 2),
aes(color=factor(..quantile..), size=factor(..quantile..))) +
scale_color_manual(values=c("red","blue","red")) +
scale_size_manual(values=c(1,2,1)) +
labs(colour="Quantile") +
guides(colour=guide_legend(reverse=TRUE), size=FALSE) +
theme_classic()

Filling alphahull with ggplot2

I used alphahull package to delineate dots on a map.
I plot the contours with a geom_segment.
My question is : how to fill the delineation given by the segment with a color ?
Here is a reproducible example :
set.seed(2)
dat <- data.frame(x = rnorm(20, 10, 5), y = rnorm(20, 20, 5), z = c(rep(1, 6), rep(2, 4)))
library(ggplot2)
library(alphahull)
alpha <- 100
alphashape1 <- ashape(dat[which(dat$z==1), c("x", "y")], alpha = alpha)
alphashape2 <- ashape(dat[which(dat$z==2), c("x", "y")], alpha = alpha)
map <- ggplot(dat, aes(x = x, y = y)) +
geom_point(data = dat, aes(x = x, y = y, colour = as.factor(dat$z))) +
geom_segment(data = data.frame(alphashape1$edges), aes(x = x1, y = y1, xend = x2, yend = y2, colour = levels(as.factor(dat$z))[1])) +
geom_segment(data = data.frame(alphashape2$edges), aes(x = x1, y = y1, xend = x2, yend = y2, colour = levels(as.factor(dat$z))[2]))
map
I believe this works w/o the need for graph ops:
fortify.ashape <- function(ashape_res) {
xdf <- data.frame(ashape_res$edges)
xdf <- do.call(
rbind,
lapply(1:nrow(xdf), function(i) {
rbind(
data.frame(x=xdf$x1[i], y=xdf$y1[i]),
data.frame(x=xdf$x2[i], y=xdf$y2[i])
)
})
)
xdf <- xdf[order(-1 * atan2(
xdf$y - mean(range(xdf$y)),
xdf$x - mean(range(xdf$x)))), c("x", "y")]
xdf <- rbind.data.frame(xdf[nrow(xdf),], xdf[1:(nrow(xdf)-1),])
xdf
}
alphashape1 <- ashape(dat[which(dat$z == 1), c("x", "y")], alpha = 15)
alphashape2 <- ashape(dat[which(dat$z == 2), c("x", "y")], alpha = 15)
ggplot() +
geom_point(data = dat, aes(x = x, y = y, colour = as.factor(dat$z))) +
geom_polygon(data=alphashape1, aes(x, y), fill="red", alpha=2/3) +
geom_polygon(data=alphashape2, aes(x, y), fill="blue", alpha=2/3)
This is because the ashape function only returns segments, not in any order.
The only way I found to reconstruct the order was by using the node information to form a graph and then find the shortest path along that graph.
A detailed example is here: https://rpubs.com/geospacedman/alphasimple - the code needs wrapping into a single function, which should be fairly easy to do. Once you have that order sorted, geom_polygon will draw it with filled shading in ggplot2.
Based on Spacedman's answer, I ordered separately the two sets of points and came up with this solution.
It could be optimized with a function that does it for each group automatically.
set.seed(2)
dat <- data.frame(x = rnorm(20, 10, 5), y = rnorm(20, 20, 5), z = c(rep(1, 6), rep(2, 4)))
library(ggplot2)
library(alphahull)
alpha <- 100
alphashape1 <- ashape(dat[which(dat$z==1), c("x", "y")], alpha = alpha)
alphashape2 <- ashape(dat[which(dat$z==2), c("x", "y")], alpha = alpha)
map <- ggplot(dat, aes(x = x, y = y)) +
geom_point(data = dat, aes(x = x, y = y, colour = as.factor(dat$z))) +
geom_segment(data = data.frame(alphashape1$edges), aes(x = x1, y = y1, xend = x2, yend = y2, colour = levels(as.factor(dat$z))[1])) +
geom_segment(data = data.frame(alphashape2$edges), aes(x = x1, y = y1, xend = x2, yend = y2, colour = levels(as.factor(dat$z))[2]))
map
alpha <- 15 # transparency argument
# First contour
alphashape1 <- ashape(dat[which(dat$z == 1), c("x", "y")], alpha = alpha)
alphashape1_ind <- alphashape1$edges[, c("ind1", "ind2")]
class(alphashape1_ind) = "character"
alphashape1_graph <- graph.edgelist(alphashape1_ind, directed = FALSE)
cut_graph1 <- alphashape1_graph - E(alphashape1_graph)[1] # Cut the first edge
ends1 <- names(which(degree(cut_graph1) == 1)) # Get two nodes with degree = 1
path1 <- get.shortest.paths(cut_graph1, ends1[1], ends1[2])$vpath[[1]]
path_nodes1 <- as.numeric(V(alphashape1_graph)[path1]$name)
# Second contour
alphashape2 <- ashape(dat[which(dat$z == 2), c("x", "y")], alpha = alpha)
alphashape2_ind <- alphashape2$edges[, c("ind1", "ind2")]
class(alphashape2_ind) = "character"
alphashape2_graph <- graph.edgelist(alphashape2_ind, directed = FALSE)
cut_graph2 <- alphashape2_graph - E(alphashape2_graph)[1] # Cut the first edge
ends2 <- names(which(degree(cut_graph2) == 1)) # Get two nodes with degree = 1
path2 <- get.shortest.paths(cut_graph2, ends2[1], ends2[2])$vpath[[1]]
path_nodes2 <- as.numeric(V(alphashape2_graph)[path2]$name)
# Updating of previous plot (see question)
map +
geom_polygon(data = dat[which(dat$z == 1), c("x", "y")][path_nodes1, ], aes(x = x, y = y),
fill = "red", colour = "red", size = 0.5, alpha = 0.3) +
geom_polygon(data = dat[which(dat$z == 2), c("x", "y")][path_nodes2, ],
aes(x = x, y = y), colour = "blue", fill = "blue", size = 0.5, alpha = 0.3)

R ggplot2 -- Set z axis limit in stat_contour plot

In ggplot2 x and y axis limits can be specified using a shorthand approach (xlim(0, 1)) or a more structured approach (scale_x_continuous(limits = c(0, 1))). How do I do this for the z axis in a contour plot?
Reproducible example:
library(ggplot2)
library(akima)
x <- runif(40)
y <- runif(40)
z <- runif(40, min = 0.5, max = 1)
fld <- interp(x = x,
y = y,
z = z,
xo = seq(min(x), max(x), length = 40),
duplicate = 'mean')
gdat <- interp2xyz(fld, data.frame = TRUE)
ggplot(gdat, aes(x = x, y = y, z = z)) +
geom_tile(aes(fill = z)) +
stat_contour(aes(fill = ..level..), geom = 'polygon', binwidth = 0.005) +
geom_contour(colour = 'white', alpha = 0.5) +
scale_fill_gradient(low = 'white', high = 'red') +
theme_bw()
In this example, I artificially constrained the z axis to range from 0.5 to 1. However, I would like to plot it so that the white colour gradient begins at 0 instead of 0.5. Any suggestions on how to do this?

Resources