I am looking to generate a ternary plot with binned polygons (either triangle or hex, preferably in a ggplot framework) where the color of the polygon is a binned mean or median of selected values.
This script gets very close, but triangle cell color is representative of a number of observations, rather than a mean value of observations contained within the triangle cell.
So rather than soley providing X,Y, and Z; I would provide a fourth fill/value variable is provided from which binned means or medians are calculated and represented as a color on a gradient.
Akin to the below image, though in a ternary framework with an additional axis.
Image of stat_summary_hex() plot with color as binned mean value
I appreciate the help. Thank you.
Dummy data to begin with:
#load libraries
devtools::install_git('https://bitbucket.org/nicholasehamilton/ggtern')
library(ggtern)
library(ggplot)
# example data
sig <- matrix(c(3,0,0,2),2,2)
data <- data.frame(mvrnorm(n=10000, rep(2, 2), sig))
data$X1 <- data$X1/max(data$X1)
data$X2 <- data$X2/max(data$X2)
data$X1[which(data$X1<0)] <- runif(length(data$X1[which(data$X1<0)]))
data$X2[which(data$X2<0)] <- runif(length(data$X2[which(data$X2<0)]))
data$X3 <- with(data, 1-X1-X2)
data <- data[data$X3 >= 0,]
data$X4 <- rnorm(dim(data)[1])
data <- data.frame(X = data$X1, Y = data$X2, Z = data$X3, fill_variable = data$X4)
str(data)
# simple ternary plot where color of point is the fill variable value
ggtern(data,aes(X,Y,Z, color = fill_variable))+geom_point()
# 2D example, not a ternary though. Keep in mind in geom_hex Z is the fill, not the additional axis like ggtern
ggplot(data,aes(X,Y))+stat_summary_hex(aes(z = fill_variable))
This code isn't cleaned up, but it's a good jumping off point. Credit for original goes the OP referenced in the first question.
I made some minor adjustments to the count_bin function to instead of doing bin counts, it does bin medians. Use at your own risk and please point out any bugs. For my implementation this reports 0 for NA bins.
Example:
Function for binned median (pardon the name, just saves time):
count_bin <- function(data, minT, maxT, minR, maxR, minL, maxL) {
ret <- data
ret <- with(ret, ret[minT <= X1 & X1 < maxT,])
ret <- with(ret, ret[minL <= X2 & X2 < maxL,])
ret <- with(ret, ret[minR <= X3 & X3 < maxR,])
if(is.na(median(ret$VAR))) {
ret <- 0
} else {
ret <- median(ret$VAR)
}
ret
}
Modified heatmap function:
heatmap3d <- function(data, inc, logscale=FALSE, text=FALSE, plot_corner=TRUE) {
# When plot_corner is FALSE, corner_cutoff determines where to stop plotting
corner_cutoff = 1
# When plot_corner is FALSE, corner_number toggles display of obervations in the corners
# This only has an effect when text==FALSE
corner_numbers = TRUE
count <- 1
points <- data.frame()
for (z in seq(0,1,inc)) {
x <- 1- z
y <- 0
while (x>0) {
points <- rbind(points, c(count, x, y, z))
x <- round(x - inc, digits=2)
y <- round(y + inc, digits=2)
count <- count + 1
}
points <- rbind(points, c(count, x, y, z))
count <- count + 1
}
colnames(points) = c("IDPoint","T","L","R")
#str(points)
#str(count)
# base <- ggtern(data=points,aes(L,T,R)) +
# theme_bw() + theme_hidetitles() + theme_hidearrows() +
# geom_point(shape=21,size=10,color="blue",fill="white") +
# geom_text(aes(label=IDPoint),color="blue")
# print(base)
polygons <- data.frame()
c <- 1
# Normal triangles
for (p in points$IDPoint) {
if (is.element(p, points$IDPoint[points$T==0])) {
next
} else {
pL <- points$L[points$IDPoint==p]
pT <- points$T[points$IDPoint==p]
pR <- points$R[points$IDPoint==p]
polygons <- rbind(polygons,
c(c,p),
c(c,points$IDPoint[abs(points$L-pL) < inc/2 & abs(points$R-pR-inc) < inc/2]),
c(c,points$IDPoint[abs(points$L-pL-inc) < inc/2 & abs(points$R-pR) < inc/2]))
c <- c + 1
}
}
#str(c)
# Upside down triangles
for (p in points$IDPoint) {
if (!is.element(p, points$IDPoint[points$T==0])) {
if (!is.element(p, points$IDPoint[points$L==0])) {
pL <- points$L[points$IDPoint==p]
pT <- points$T[points$IDPoint==p]
pR <- points$R[points$IDPoint==p]
polygons <- rbind(polygons,
c(c,p),
c(c,points$IDPoint[abs(points$T-pT) < inc/2 & abs(points$R-pR-inc) < inc/2]),
c(c,points$IDPoint[abs(points$L-pL) < inc/2 & abs(points$R-pR-inc) < inc/2]))
c <- c + 1
}
}
}
#str(c)
# IMPORTANT FOR CORRECT ORDERING.
polygons$PointOrder <- 1:nrow(polygons)
colnames(polygons) = c("IDLabel","IDPoint","PointOrder")
df.tr <- merge(polygons,points)
Labs = ddply(df.tr,"IDLabel",function(x){c(c(mean(x$T),mean(x$L),mean(x$R)))})
colnames(Labs) = c("Label","T","L","R")
#str(Labs)
#triangles <- ggtern(data=df.tr,aes(L,T,R)) +
# geom_polygon(aes(group=IDLabel),color="black",alpha=0.25) +
# geom_text(data=Labs,aes(label=Label),size=4,color="black") +
# theme_bw()
# print(triangles)
bins <- ddply(df.tr, .(IDLabel), summarize,
maxT=max(T),
maxL=max(L),
maxR=max(R),
minT=min(T),
minL=min(L),
minR=min(R))
#str(bins)
count <- ddply(bins, .(IDLabel), summarize,
N=count_bin(data, minT, maxT, minR, maxR, minL, maxL)
#N=mean(data)
)
df <- join(df.tr, count, by="IDLabel")
str(count)
Labs = ddply(df,.(IDLabel,N),function(x){c(c(mean(x$T),mean(x$L),mean(x$R)))})
colnames(Labs) = c("Label","N","T","L","R")
if (plot_corner==FALSE){
corner <- ddply(df, .(IDPoint, IDLabel), summarize, maxperc=max(T,L,R))
corner <- corner$IDLabel[corner$maxperc>=corner_cutoff]
df$N[is.element(df$IDLabel, corner)] <- 0
if (text==FALSE & corner_numbers==TRUE) {
Labs$N[!is.element(Labs$Label, corner)] <- ""
text=TRUE
}
}
heat <- ggtern(data=df,aes(L,T,R)) +
geom_polygon(aes(fill=N,group=IDLabel),color="black",alpha=1, size = 0.1,show.legend = F)
if (logscale == TRUE) {
heat <- heat + scale_fill_gradient(name="Observations", trans = "log",
low=palette[2], high=palette[4])
} else {
heat <- heat + scale_fill_distiller(name="Median Value",
palette = "Spectral")
}
heat <<- heat +
Tlab("x") +
Rlab("y") +
Llab("z") +
theme_bw() +
theme(axis.tern.arrowsep=unit(0.02,"npc"), #0.01npc away from ticks ticklength
axis.tern.arrowstart=0.25,axis.tern.arrowfinish=0.75,
axis.tern.text=element_text(size=12),
axis.tern.arrow.text.T=element_text(vjust=-1),validate = F,
axis.tern.arrow.text.R=element_text(vjust=2),
axis.tern.arrow.text.L=element_text(vjust=-1),
#axis.tern.arrow.text=element_text(size=12),
axis.tern.title=element_text(size=15),
axis.tern.text=element_blank(),
axis.tern.arrow.text=element_blank())
if (text==FALSE) {
print(heat)
} else {
print(heat + geom_text(data=Labs,aes(label=N),size=3,color="white"))
}
}
Dummy example:
# dummy example
sig <- matrix(c(3,3,3,3),3,3)
data <- data.frame(mvrnorm(n=10000, rep(2, 2), sig))
data$X1[which(data$X1<0)] <- runif(length(data$X1[which(data$X1<0)]))
data$X2[which(data$X2<0)] <- runif(length(data$X2[which(data$X2<0)]))
data$X3 <- with(data, 1-X1-X2)
data <- data[data$X3 >= 0,]
data$VAR <- rnorm(dim(data)[1])
data <- data.frame(X = data$X1, Y = data$X2, Z = data$X3, fill_variable = data$X4)
str(data)
ggtern(data,aes(X1,
X2,
X3, color = VAR))+geom_point(size = 5)+scale_color_distiller(palette = "Spectral")
heatmap3d(data,.05)
Related
I put together a function to identify outliers. It takes a dataframe and then shows plots of the data with lines to indicate potential outliers. It'll give a table with outliers marked, too.
But, it is SLOOOW. The problem is it takes a really long time for the plots to load.
I was curious if you might have advice on how to speed this up.
Related: Is the default plotting system faster than ggplot?
I'll start with the dependencies
#These next four functions are not mine. They're used in GetOutliers()
ExtractDetails <- function(x, down, up){
outClass <- rep("N", length(x))
indexLo <- which(x < down)
indexHi <- which(x > up)
outClass[indexLo] <- "L"
outClass[indexHi] <- "U"
index <- union(indexLo, indexHi)
values <- x[index]
outClass <- outClass[index]
nOut <- length(index)
maxNom <- max(x[which(x <= up)])
minNom <- min(x[which(x >= down)])
outList <- list(nOut = nOut, lowLim = down,
upLim = up, minNom = minNom,
maxNom = maxNom, index = index,
values = values,
outClass = outClass)
return(outList)
}
Hampel <- function(x, t = 3){
#
mu <- median(x, na.rm = TRUE)
sig <- mad(x, na.rm = TRUE)
if (sig == 0){
message("Hampel identifer implosion: MAD scale estimate is zero")
}
up<-mu+t*sig
down<-mu-t*sig
out <- list(up = up, down = down)
return(out)
}
ThreeSigma <- function(x, t = 3){
#
mu <- mean(x, na.rm = TRUE)
sig <- sd(x, na.rm = TRUE)
if (sig == 0){
message("All non-missing x-values are identical")
}
up<-mu+t* sig
down<-mu-t * sig
out <- list(up = up, down = down)
return(out)
}
BoxplotRule <- function(x, t = 1.5){
#
xL <- quantile(x, na.rm = TRUE, probs = 0.25, names = FALSE)
xU <- quantile(x, na.rm = TRUE, probs = 0.75, names = FALSE)
Q<-xU-xL
if(Q==0){
message("Boxplot rule implosion: interquartile distance is zero")
}
up<-xU+t*Q
down<-xU-t*Q
out <- list(up = up, down = down)
return(out)
}
FindOutliers <- function(x, t3 = 3, tH = 3, tb = 1.5){
threeLims <- ThreeSigma(x, t = t3)
HampLims <- Hampel(x, t = tH)
boxLims <- BoxplotRule(x, t = tb)
n <- length(x)
nMiss <- length(which(is.na(x)))
threeList <- ExtractDetails(x, threeLims$down, threeLims$up)
HampList <- ExtractDetails(x, HampLims$down, HampLims$up)
boxList <- ExtractDetails(x, boxLims$down, boxLims$up)
sumFrame <- data.frame(method = "ThreeSigma", n = n,
nMiss = nMiss, nOut = threeList$nOut,
lowLim = threeList$lowLim,
upLim = threeList$upLim,
minNom = threeList$minNom,
maxNom = threeList$maxNom)
upFrame <- data.frame(method = "Hampel", n = n,
nMiss = nMiss, nOut = HampList$nOut,
lowLim = HampList$lowLim,
upLim = HampList$upLim,
minNom = HampList$minNom,
maxNom = HampList$maxNom)
sumFrame <- rbind.data.frame(sumFrame, upFrame)
upFrame <- data.frame(method = "BoxplotRule", n = n,
nMiss = nMiss, nOut = boxList$nOut,
lowLim = boxList$lowLim,
upLim = boxList$upLim,
minNom = boxList$minNom,
maxNom = boxList$maxNom)
sumFrame <- rbind.data.frame(sumFrame, upFrame)
threeFrame <- data.frame(index = threeList$index,
values = threeList$values,
type = threeList$outClass)
HampFrame <- data.frame(index = HampList$index,
values = HampList$values,
type = HampList$outClass)
boxFrame <- data.frame(index = boxList$index,
values = boxList$values,
type = boxList$outClass)
outList <- list(summary = sumFrame, threeSigma = threeFrame,
Hampel = HampFrame, boxplotRule = boxFrame)
return(outList)
}
#strip non-numeric variables out of a dataframe
num_vars <- function(df){
X <- which(sapply(df, is.numeric))
num_vars <- df[names(X)]
return(num_vars)
}
This is the function
GetOutliers <- function(df){
library('dplyr')
library('ggplot2')
#strip out the non-numeric columns
df_out <- num_vars(df)
#initialize the data frame
df_out$Hampel <- NA
df_out$threeSigma <- NA
df_out$boxplotRule <- NA
df_out_id <- df_out
#identify outliers for each column
for (i in 1:length(names(num_vars(df)))){
#find the outliers
Outs <- FindOutliers(df_out[[i]])
OutsSum <- Outs$summary
#re-enter the outlier status
df_out$Hampel <- NA
df_out$threeSigma <- NA
df_out$boxplotRule <- NA
ifelse(is.na(Outs$Hampel), print(), df_out[unlist(Outs$Hampel[1]),]$Hampel <- TRUE)
ifelse(is.na(Outs$threeSigma), print(), df_out[unlist(Outs$threeSigma[1]),]$threeSigma <- TRUE)
ifelse(is.na(Outs$boxplotRule), print(), df_out[unlist(Outs$boxplotRule[1]),]$boxplotRule <- TRUE)
#visualize the outliers and print outlier information
Temp <- df_out
A <- colnames(Temp)[i]
AA <- paste(A,"Index")
colnames(Temp)[i] <- 'curr_column'
#table with outlier status
X <- arrange(subset(Temp,Hampel == TRUE | boxplotRule == TRUE | threeSigma == TRUE), desc(curr_column))
#scatterplot with labels
Y <- ggplot(Temp,aes(seq_along(curr_column),curr_column)) + geom_point() +
geom_hline(yintercept=OutsSum$lowLim[1],linetype = 'dashed') +
geom_hline(yintercept=OutsSum$lowLim[2],linetype = 'dashed') +
geom_hline(yintercept=OutsSum$lowLim[3],linetype = 'dashed') +
geom_hline(yintercept=OutsSum$upLim[1],linetype = 'dashed') +
geom_hline(yintercept=OutsSum$upLim[2],linetype = 'dashed') +
geom_hline(yintercept=OutsSum$upLim[3],linetype = 'dashed') +
geom_text(aes(40,OutsSum$lowLim[1],label="ThreeSigma Lower",vjust=-1)) +
geom_text(aes(40,OutsSum$lowLim[2],label="Hampel Lower",vjust=-1)) +
geom_text(aes(40,OutsSum$lowLim[3],label="Boxplot Lower",vjust=-1)) +
geom_text(aes(40,OutsSum$upLim[1],label="ThreeSigma Upper",vjust=-1)) +
geom_text(aes(40,OutsSum$upLim[2],label="Hampel Upper",vjust=-1)) +
geom_text(aes(40,OutsSum$upLim[3],label="Boxplot Upper",vjust=-1)) +
xlab(AA) + ylab(A)
#scatterplot without labels
Z <- ggplot(Temp,aes(seq_along(curr_column),curr_column)) + geom_point() +
geom_hline(yintercept=OutsSum$lowLim[1],linetype = 'dashed') +
geom_hline(yintercept=OutsSum$lowLim[2],linetype = 'dashed') +
geom_hline(yintercept=OutsSum$lowLim[3],linetype = 'dashed') +
geom_hline(yintercept=OutsSum$upLim[1],linetype = 'dashed') +
geom_hline(yintercept=OutsSum$upLim[2],linetype = 'dashed') +
geom_hline(yintercept=OutsSum$upLim[3],linetype = 'dashed') +
xlab(AA) + ylab(A)
U <- ggplot(Temp,aes(curr_column)) + geom_density() + xlab(A)
print(A)
print(X)
print(OutsSum)
print(Z)
print(Y)
print(U)
#mark the extreme outliers, the rest are reasonable outliers
A <- colnames(df_out_id[i])
Q <- as.numeric(readline(prompt="Enter the index for final Extreme value on the upper limit (if none, enter 0): "))
W <- as.numeric(readline(prompt="Enter the index for first Extreme value on the lower limit (if none, enter 0): "))
col <- df_out_id[i]
df_out_id[i] <- sapply(col[[1]], function(x){
if(Q>1 & x %in% X$curr_column[1:Q]) return('Extreme')
if(W>1 & x %in% X$curr_column[W:length(X$curr_column)]) return('Extreme')
else if (x %in% X$curr_column[Q+1:length(X$curr_column)]) return('Reasonable')
else return('Non-Outlier')
})
}
#return a dataframe with outlier status, excluding the outlier ID columns
summary(df_out_id)
return(df_out_id[1:(length(names(df_out_id))-3)])
}
Example
library('ISLR')
data(Carseats)
GetOutliers(Carseats)
It'll show you the outliers for each numeric variable.
It'll plot the variable density and then a scatterplot with identifier lines
It will also accept input so you can mark some outliers as reasonable and other as extreme
I tried to rewrite the below plot_densities fuction in order to use ggplot2.
plot_densities <- function(density) {
neg_density <- density[[1]]
pos_density <- density[[2]]
plot(
pos_density,
ylim = range(c(neg_density$y, pos_density$y)),
main = "Coverage plot of Sample 5",
xlab = "lenght 21",
col = 'blue',
type = 'h'
)
lines(neg_density, type = 'h', col = 'red')
}
Unfurtunately the new function below caused Error in density.default(x = neg) : object 'neg' not found
plot_densities2 <- function(density) {
neg_density <- density[[1]]
pos_density <- density[[2]]
densities = append(neg_density, pos_density)
ggplot(as.data.frame(densities), aes(x=x, y=y)) +
theme_bw() +
geom_density(alpha=0.5)
}
The full code can be found below and the data can be downloaded from here
#apt update && apt install zlib1g-dev
#install if necessary
source("http://bioconductor.org/biocLite.R")
biocLite("Rsamtools")
#load library
library(Rsamtools)
extracting_pos_neg_reads <- function(bam_fn) {
#read in entire BAM file
bam <- scanBam(bam_fn)
#names of the BAM fields
names(bam[[1]])
# [1] "qname" "flag" "rname" "strand" "pos" "qwidth" "mapq" "cigar"
# [9] "mrnm" "mpos" "isize" "seq" "qual"
#distribution of BAM flags
table(bam[[1]]$flag)
# 0 4 16
#1472261 775200 1652949
#function for collapsing the list of lists into a single list
#as per the Rsamtools vignette
.unlist <- function (x) {
## do.call(c, ...) coerces factor to integer, which is undesired
x1 <- x[[1L]]
if (is.factor(x1)) {
structure(unlist(x), class = "factor", levels = levels(x1))
} else {
do.call(c, x)
}
}
#store names of BAM fields
bam_field <- names(bam[[1]])
#go through each BAM field and unlist
list <- lapply(bam_field, function(y)
.unlist(lapply(bam, "[[", y)))
#store as data frame
bam_df <- do.call("DataFrame", list)
names(bam_df) <- bam_field
dim(bam_df)
#[1] 3900410 13
#---------
#use chr22 as an example
#how many entries on the negative strand of chr22?
###table(bam_df$rname == 'chr22' & bam_df$flag == 16)
# FALSE TRUE
#3875997 24413
#function for checking negative strand
check_neg <- function(x) {
if (intToBits(x)[5] == 1) {
return(T)
} else {
return(F)
}
}
#test neg function with subset of chr22
test <- subset(bam_df)#, rname == 'chr22')
dim(test)
#[1] 56426 13
table(apply(as.data.frame(test$flag), 1, check_neg))
#number same as above
#FALSE TRUE
#32013 24413
#function for checking positive strand
check_pos <- function(x) {
if (intToBits(x)[3] == 1) {
return(F)
} else if (intToBits(x)[5] != 1) {
return(T)
} else {
return(F)
}
}
#check pos function
table(apply(as.data.frame(test$flag), 1, check_pos))
#looks OK
#FALSE TRUE
#24413 32013
#store the mapped positions on the plus and minus strands
neg <- bam_df[apply(as.data.frame(bam_df$flag), 1, check_neg),
'pos']
length(neg)
#[1] 24413
pos <- bam_df[apply(as.data.frame(bam_df$flag), 1, check_pos),
'pos']
length(pos)
#[1] 32013
#calculate the densities
neg_density <- density(neg)
pos_density <- density(pos)
#display the negative strand with negative values
neg_density$y <- neg_density$y * -1
return (list(neg_density, pos_density))
}
plot_densities <- function(density) {
neg_density <- density[[1]]
pos_density <- density[[2]]
plot(
pos_density,
ylim = range(c(neg_density$y, pos_density$y)),
main = "Coverage plot of Sample 5",
xlab = "lenght 21",
col = 'blue',
type = 'h'
)
lines(neg_density, type = 'h', col = 'red')
}
plot_densities2 <- function(density) {
neg_density <- density[[1]]
pos_density <- density[[2]]
densities = append(neg_density, pos_density)
densities
ggplot(as.data.frame(densities), aes(x=x, y=y)) +
theme_bw() +
geom_density(alpha=0.5)
}
filenames <- c("~/sample5-21.sam-uniq.sorted.bam", "~/sample5-24.sam-uniq.sorted.bam")
for ( i in filenames){
print(i)
density <- extracting_pos_neg_reads(i)
plot_densities2(density)
}
Density objects seem to be not the best ones to be used with append and as.data.frame. In particular, they contain some elements that caused problems but at the same time are unnecessary. What we may do is to pick only x and y elements as to construct the relevant data frame:
plot_densities2 <- function(density) {
densities <- cbind(rbind(data.frame(density[[1]][1:2]), data.frame(density[[2]][1:2])),
id = rep(c("neg", "pos"), each = length(density[[1]]$x)))
print(ggplot(data = densities, aes(x = x, y = y, fill = id)) +
theme_bw() + geom_area(alpha = 0.5))
}
I am analysing a time series signal. I set a threshold to separate the noise from the baseline noise. In order to identify the properties of each signal sequence (duration, amplitude, maximum signal...), I built a function to aggregate all the signal points that are continuous as different "peaks". Despite this function does what I want, I was wondering if anyone can help me to make it more efficient -e. g. vectorization, because I aim to run the function on a data.table of more than 1M rows. Here is a sample data with the function:
# Generate dummy data
x <- sin(seq(from = 0, to = 20, length.out = 200)) + rnorm(200, 0,0.1)
x <- zoo(x)
plot(x)
# Label each point as signal (== )1) or noise (0)
y <- ifelse(x > 0.5, 1, 0)
# Function to label each peak
peak_labeler <- function(x) {
tmp <- NULL
for (i in seq_along(x)) {
if (x[i] == 0) { tmp[i] <- 0 } # If baseline, mark as 0
if (x[i] == 1) {
# If x[n] belongs to a peak
if (i == 1) {tmp[i] <- 1} # Label as 1 at t0
else{
if (!exists("Peak")) {Peak <- 0}
if (x[i - 1] == 0) {
# if previous point is no peak, add as peak
Peak <- Peak + 1
tmp[i] <- Peak
}
if (x[i - 1] == 1) {
tmp[i] <- Peak
}
}
}
}
return(tmp)
rm(tmp, Peak, i) # Garbage collection
}
# Label peaks
dummy <- data.frame(t = 1:200, x,y,tmp = peak_labeler(y))
# Show data
ggplot(dummy, aes(x = t, y = x)) +
geom_point(aes(col = as.factor(tmp), group = 1))
Here's an approach using dplyr.
The test in the cross_threshold line works by evaluating whether y is on a different side of 0.5 than the prior y. If so, the sign of the two terms y - threshold and lag(y) - threshold will be different, leading to a TRUE, which is multiplied by 1 to become 1. If they're on the same side of 0.5, you'll get a FALSE and a 0. The default = 0 part deals with the first line, where lag(y) is undefined. Then we add up how many cumulative crosses there have been to define the tmp group.
library(dplyr)
threshold = 0.5
dummy <- data.frame(t = 1:200, x, y) %>%
mutate(cross_threshold = 1 * (sign(y - threshold) != sign(lag(y, default = 0) - threshold)),
# Line above now optional, just if we want to label all crossings
up = 1 * ((y > threshold) & (lag(y) < threshold)),
tmp = if_else(y > threshold, cumsum(up), 0))
ggplot(dummy, aes(x = t, y = x)) +
geom_point(aes(col = as.factor(tmp), group = 1)) +
geom_point(data = filter(dummy, cross_threshold == 1), shape = 21, size = 5)
I am using the ggmap route function to calculate and visualize hundreds of routes using D.C. Capital Bikeshare data. I am successfully able to do this with one minor problem, the route path doesn't follow roads, particularly curved roads (see screenshot below). Is there a way to tweek my code to all for more detailed paths?
library(tidyverse)
library(ggmap)
# Example dataset
feb_14 <- read.csv('https://raw.githubusercontent.com/smitty1788/Personal-Website/master/dl/CaBi_Feb_2017.csv', stringsAsFactors = FALSE)
# Subset first 300 rows, keep start and end Lat/Long strings
start<-c(feb_14[1:300, 14])
dest<-c(feb_14[1:300, 15])
# df of individual routes
routes <- tibble(
start,
dest)
# Function to calculate route
calculationroute <- function(startingpoint, stoppoint) {
route(from = startingpoint,
to = stoppoint,
mode = 'bicycling',
structure = "route")}
# Calculate route path for all individual trips
calculatedroutes <- mapply(calculationroute,
startingpoint = routes$start,
stoppoint = routes$dest,
SIMPLIFY = FALSE)
# Unlist and merge in single dataframe
do.call(rbind.data.frame, lapply(names(calculatedroutes), function(x) {
cbind.data.frame(route=x, calculatedroutes[[x]], stringsAsFactors=FALSE)
})) -> long_routes
# create map with routes
basicmap <- get_map(location = 'washingtondc',
zoom = 13,
maptype = "toner-background",
source = "google",
color = "bw")
basicmap <- ggmap(basicmap)
basicmap + geom_path(data=long_routes,
aes(x=lon, y=lat, group=route), color = "red",
size=1, alpha = .4, lineend = "round")
The answer was to place the decodeLine function into the do.call to create the long routes dataframe
decodeLine <- function(encoded){
require(bitops)
vlen <- nchar(encoded)
vindex <- 0
varray <- NULL
vlat <- 0
vlng <- 0
while(vindex < vlen){
vb <- NULL
vshift <- 0
vresult <- 0
repeat{
if(vindex + 1 <= vlen){
vindex <- vindex + 1
vb <- as.integer(charToRaw(substr(encoded, vindex, vindex))) - 63
}
vresult <- bitOr(vresult, bitShiftL(bitAnd(vb, 31), vshift))
vshift <- vshift + 5
if(vb < 32) break
}
dlat <- ifelse(
bitAnd(vresult, 1)
, -(bitShiftR(vresult, 1)+1)
, bitShiftR(vresult, 1)
)
vlat <- vlat + dlat
vshift <- 0
vresult <- 0
repeat{
if(vindex + 1 <= vlen) {
vindex <- vindex+1
vb <- as.integer(charToRaw(substr(encoded, vindex, vindex))) - 63
}
vresult <- bitOr(vresult, bitShiftL(bitAnd(vb, 31), vshift))
vshift <- vshift + 5
if(vb < 32) break
}
dlng <- ifelse(
bitAnd(vresult, 1)
, -(bitShiftR(vresult, 1)+1)
, bitShiftR(vresult, 1)
)
vlng <- vlng + dlng
varray <- rbind(varray, c(vlat * 1e-5, vlng * 1e-5))
}
coords <- data.frame(varray)
names(coords) <- c("lat", "lon")
coords
}
calculatedroutes <- mapply(calculationroute,
startingpoint = routes$start,
stoppoint = routes$dest,
SIMPLIFY = FALSE)
do.call(rbind.data.frame, lapply(names(calculatedroutes), function(x) {
cbind.data.frame(route = x, decodeLine(calculatedroutes[[x]]$routes[[1]]$overview_polyline$points), stringsAsFactors=FALSE)
})) -> long_routes
I wish to plot mean (or other function) of reaction time as a function of the location of the target in the x y plane.
As test data:
library(ggplot2)
xs <- runif(100,-1,1)
ys <- runif(100,-1,1)
rts <- rnorm(100)
testDF <- data.frame("x"=xs,"y"=ys,"rt"=rts)
I know I can do this:
p <- ggplot(data = testDF,aes(x=x,y=y))+geom_bin2d(bins=10)
What I would like to be able to do, is the same thing but plot a function of the data in each bin rather than counts. Can I do this?
Or do I need to generate the conditional means first in R (e.g. drt <- tapply(testDF$rt,list(cut(testDF$x,10),cut(testDF$y,10)),mean)) and then plot that?
Thank you.
Update With the release of ggplot2 0.9.0, much of this functionality is covered by the new additions of stat_summary2d and stat_summary_bin.
here is a gist for this answer: https://gist.github.com/1341218
here is a slight modification of stat_bin2d so as to accept arbitrary function:
StatAggr2d <- proto(Stat, {
objname <- "aggr2d"
default_aes <- function(.) aes(fill = ..value..)
required_aes <- c("x", "y", "z")
default_geom <- function(.) GeomRect
calculate <- function(., data, scales, binwidth = NULL, bins = 30, breaks = NULL, origin = NULL, drop = TRUE, fun = mean, ...) {
range <- list(
x = scales$x$output_set(),
y = scales$y$output_set()
)
# Determine binwidth, if omitted
if (is.null(binwidth)) {
binwidth <- c(NA, NA)
if (is.integer(data$x)) {
binwidth[1] <- 1
} else {
binwidth[1] <- diff(range$x) / bins
}
if (is.integer(data$y)) {
binwidth[2] <- 1
} else {
binwidth[2] <- diff(range$y) / bins
}
}
stopifnot(is.numeric(binwidth))
stopifnot(length(binwidth) == 2)
# Determine breaks, if omitted
if (is.null(breaks)) {
if (is.null(origin)) {
breaks <- list(
fullseq(range$x, binwidth[1]),
fullseq(range$y, binwidth[2])
)
} else {
breaks <- list(
seq(origin[1], max(range$x) + binwidth[1], binwidth[1]),
seq(origin[2], max(range$y) + binwidth[2], binwidth[2])
)
}
}
stopifnot(is.list(breaks))
stopifnot(length(breaks) == 2)
stopifnot(all(sapply(breaks, is.numeric)))
names(breaks) <- c("x", "y")
xbin <- cut(data$x, sort(breaks$x), include.lowest=TRUE)
ybin <- cut(data$y, sort(breaks$y), include.lowest=TRUE)
if (is.null(data$weight)) data$weight <- 1
ans <- ddply(data.frame(data, xbin, ybin), .(xbin, ybin), function(d) data.frame(value = fun(d$z)))
within(ans,{
xint <- as.numeric(xbin)
xmin <- breaks$x[xint]
xmax <- breaks$x[xint + 1]
yint <- as.numeric(ybin)
ymin <- breaks$y[yint]
ymax <- breaks$y[yint + 1]
})
}
})
stat_aggr2d <- StatAggr2d$build_accessor()
and usage:
ggplot(data = testDF,aes(x=x,y=y, z=rts))+stat_aggr2d(bins=3)
ggplot(data = testDF,aes(x=x,y=y, z=rts))+
stat_aggr2d(bins=3, fun = function(x) sum(x^2))
As well, here is a slight modification of stat_binhex:
StatAggrhex <- proto(Stat, {
objname <- "aggrhex"
default_aes <- function(.) aes(fill = ..value..)
required_aes <- c("x", "y", "z")
default_geom <- function(.) GeomHex
calculate <- function(., data, scales, binwidth = NULL, bins = 30, na.rm = FALSE, fun = mean, ...) {
try_require("hexbin")
data <- remove_missing(data, na.rm, c("x", "y"), name="stat_hexbin")
if (is.null(binwidth)) {
binwidth <- c(
diff(scales$x$input_set()) / bins,
diff(scales$y$input_set() ) / bins
)
}
try_require("hexbin")
x <- data$x
y <- data$y
# Convert binwidths into bounds + nbins
xbnds <- c(
round_any(min(x), binwidth[1], floor) - 1e-6,
round_any(max(x), binwidth[1], ceiling) + 1e-6
)
xbins <- diff(xbnds) / binwidth[1]
ybnds <- c(
round_any(min(y), binwidth[1], floor) - 1e-6,
round_any(max(y), binwidth[2], ceiling) + 1e-6
)
ybins <- diff(ybnds) / binwidth[2]
# Call hexbin
hb <- hexbin(
x, xbnds = xbnds, xbins = xbins,
y, ybnds = ybnds, shape = ybins / xbins,
IDs = TRUE
)
value <- tapply(data$z, hb#cID, fun)
# Convert to data frame
data.frame(hcell2xy(hb), value)
}
})
stat_aggrhex <- StatAggrhex$build_accessor()
and usage:
ggplot(data = testDF,aes(x=x,y=y, z=rts))+stat_aggrhex(bins=3)
ggplot(data = testDF,aes(x=x,y=y, z=rts))+
stat_aggrhex(bins=3, fun = function(x) sum(x^2))
This turned out to be harder than I expected.
You can almost trick ggplot into doing this, by providing a weights aesthetic, but that only gives you the sum of the weights in the bin, not the mean (and you have to specify drop=FALSE to retain negative bin values). You can also retrieve either counts or density within a bin, but neither of those really solves the problem.
Here's what I ended up with:
## breaks vector (slightly coarser than the 10x10 spec above;
## even 64 bins is a lot for binning only 100 points)
bvec <- seq(-1,1,by=0.25)
## helper function
tmpf <- function(x,y,z,FUN=mean,breaks) {
midfun <- function(x) (head(x,-1)+tail(x,-1))/2
mids <- list(x=midfun(breaks$x),y=midfun(breaks$y))
tt <- tapply(z,list(cut(x,breaks$x),cut(y,breaks$y)),FUN)
mt <- melt(tt)
## factor order gets scrambled (argh), reset it
mt$X1 <- factor(mt$X1,levels=rownames(tt))
mt$X2 <- factor(mt$X2,levels=colnames(tt))
transform(X,
x=mids$x[mt$X1],
y=mids$y[mt$X2])
}
ggplot(data=with(testDF,tmpf(x,y,rt,breaks=list(x=bvec,y=bvec))),
aes(x=x,y=y,fill=value))+
geom_tile()+
scale_x_continuous(expand=c(0,0))+ ## expand to fill plot region
scale_y_continuous(expand=c(0,0))
This assumes equal bin widths, etc., could be extended ... it really is too bad that (as far as I can tell) stat_bin2d doesn't accept a user-specified function.