vector field visualisation R - r

I have a big text file with a lot of rows. Every row corresponds to one vector.
This is the example of each row:
x y dx dy
99.421875 52.078125 0.653356799108 0.782479314511
First two columns are coordinates of the beggining of the vector. And two second columnes are coordinate increments (the end minus the start).
I need to make the picture of this vector field (all the vectors on one picture).
How could I do this?
Thank you

If there is a lot of data (the question says "big file"),
plotting the individual vectors may not give a very readable plot.
Here is another approach: the vector field describes a way of deforming something drawn on the plane;
apply it to a white noise image.
vector_field <- function(
f, # Function describing the vector field
xmin=0, xmax=1, ymin=0, ymax=1,
width=600, height=600,
iterations=50,
epsilon=.01,
trace=TRUE
) {
z <- matrix(runif(width*height),nr=height)
i_to_x <- function(i) xmin + i / width * (xmax - xmin)
j_to_y <- function(j) ymin + j / height * (ymax - ymin)
x_to_i <- function(x) pmin( width, pmax( 1, floor( (x-xmin)/(xmax-xmin) * width ) ) )
y_to_j <- function(y) pmin( height, pmax( 1, floor( (y-ymin)/(ymax-ymin) * height ) ) )
i <- col(z)
j <- row(z)
x <- i_to_x(i)
y <- j_to_y(j)
res <- z
for(k in 1:iterations) {
v <- matrix( f(x, y), nc=2 )
x <- x+.01*v[,1]
y <- y+.01*v[,2]
i <- x_to_i(x)
j <- y_to_j(y)
res <- res + z[cbind(i,j)]
if(trace) {
cat(k, "/", iterations, "\n", sep="")
dev.hold()
image(res)
dev.flush()
}
}
if(trace) {
dev.hold()
image(res>quantile(res,.6), col=0:1)
dev.flush()
}
res
}
# Sample data
van_der_Pol <- function(x,y, mu=1) c(y, mu * ( 1 - x^2 ) * y - x )
res <- vector_field(
van_der_Pol,
xmin=-3, xmax=3, ymin=-3, ymax=3,
width=800, height=800,
iterations=50,
epsilon=.01
)
image(-res)
You may want to apply some image processing to the result to make it more readable.
image(res > quantile(res,.6), col=0:1)
In your case, the vector field is not described by a function:
you can use the value of the nearest neighbour or some 2-dimensional interpolation
(e.g., from the akima package).

With ggplot2, you can do something like this :
library(grid)
df <- data.frame(x=runif(10),y=runif(10),dx=rnorm(10),dy=rnorm(10))
ggplot(data=df, aes(x=x, y=y)) + geom_segment(aes(xend=x+dx, yend=y+dy), arrow = arrow(length = unit(0.3,"cm")))
This is taken almost directly from the geom_segment help page.

OK, here's a base solution:
DF <- data.frame(x=rnorm(10),y=rnorm(10),dx=runif(10),dy=runif(10))
plot(NULL, type = "n", xlim=c(-3,3),ylim=c(-3,3))
arrows(DF[,1], DF[,2], DF[,1] + DF[,3], DF[,2] + DF[,4])

Here is a example from the R-Help of pracma-package.
library(pracma)
f <- function(x, y) x^2 - y^2
xx <- c(-1, 1); yy <- c(-1, 1)
vectorfield(f, xx, yy, scale = 0.1)
for (xs in seq(-1, 1, by = 0.25)) {
sol <- rk4(f, -1, 1, xs, 100)
lines(sol$x, sol$y, col="darkgreen")
}
You can use quiver also.
library(pracma)
xyRange <- seq(-1*pi,1*pi,0.2)
temp <- meshgrid(xyRange,xyRange)
u <- sin(temp$Y)
v <- cos(temp$X)
plot(range(xyRange),range(xyRange),type="n",xlab=expression(frac(d*Phi,dx)),ylab=expression(d*Phi/dy))
quiver(temp$X,temp$Y,u,v,scale=0.5,length=0.05,angle=1)

Related

How to create covariance matrix in R?

I'm trying to build covariance matrix from a scratch (cov() function). My task is not to use any package. Hence I created my functions:
meanf <- function(x){
sum(x) / length(x)
}
sampleCov <- function(x,y){
stopifnot(identical(length(x), length(y)))
sum((x - meanf(x)) * (y - meanf(y))) / (length(x) - 1)
}
> sampleCov(winequality_red$quality, winequality_red$alcohol)
[1] 0.409789
Unfortunately, I'm stuck here. All loops I tried to apply are missing any point. Of course it's possible to just copy the sampleCov function and make it for every possible combination but that's not my point.
If I understand you correctly then I believe you want to recreate a covariate output like the one returned by cov function.
OPs given function:
meanf <- function(x){
sum(x) / length(x)
}
sampleCov <- function(x,y){
stopifnot(identical(length(x), length(y)))
sum((x - meanf(x)) * (y - meanf(y))) / (length(x) - 1)
}
You can try this way, I have taken mtcars data here:
Covariate Function:
vars <- names(mtcars)
egrid <- expand.grid(vars, vars)
egrid <- data.frame(sapply(egrid, as.character),stringsAsFactors = F)
egrid <- egrid[order(egrid$Var1, egrid$Var2),]
mat <- vector("list", nrow(egrid))
for(i in 1:nrow(egrid)){
mat[[i]] <- sampleCov(mtcars[,egrid[i,"Var1"]], mtcars[,egrid[i,"Var2"]])
}
finaldat <- cbind(egrid, cov = do.call('rbind', mat))
finaldat_list <- split(finaldat, finaldat$Var1)
mat_form <- do.call('cbind', finaldat_list)
cov_values <- mat_form[,grepl("\\.cov",names(mat_form))]
col_values <- mat_form[,paste0(egrid$Var1[1],".Var2")]
final_matrix_cov <- cbind(col_values, cov_values)
Sample Output:
> final_matrix_cov
col_values am.cov carb.cov cyl.cov disp.cov
9 mpg 1.80393145 -5.36310484 -9.1723790 -633.09721
20 cyl -0.46572581 1.52016129 3.1895161 199.66028
31 disp -36.56401210 79.06875000 199.6602823 15360.79983
42 hp -8.32056452 83.03629032 101.9314516 6721.15867
You need the matrix multiplication %*%.
sampleCov <- function(x,y){
stopifnot(identical(length(x), length(y)))
sum((x - mean(x)) %*% (y - mean(y))) / (length(x) - 1)
}
> sampleCov(rnorm(10000),rnorm(10000))
[1] 0.01808466
This is probably a little more than you need, but it should answer your question, and I think it is a nice illustration of the practical application of covariances, correlations, etc.
# load the data
link <- "https://raw.githubusercontent.com/DavZim/Efficient_Frontier/master/data/mult_assets.csv"
df <- data.table(read.csv(link))
# calculate the necessary values:
# I) expected returns for the two assets
er_x <- mean(df$x)
er_y <- mean(df$y)
# II) risk (standard deviation) as a risk measure
sd_x <- sd(df$x)
sd_y <- sd(df$y)
# III) covariance
cov_xy <- cov(df$x, df$y)
# create 1000 portfolio weights (omegas)
x_weights <- seq(from = 0, to = 1, length.out = 1000)
# create a data.table that contains the weights for the two assets
two_assets <- data.table(wx = x_weights,
wy = 1 - x_weights)
# calculate the expected returns and standard deviations for the 1000 possible portfolios
two_assets[, ':=' (er_p = wx * er_x + wy * er_y,
sd_p = sqrt(wx^2 * sd_x^2 +
wy^2 * sd_y^2 +
2 * wx * (1 - wx) * cov_xy))]
two_assets
# lastly plot the values
ggplot() +
geom_point(data = two_assets, aes(x = sd_p, y = er_p, color = wx)) +
geom_point(data = data.table(sd = c(sd_x, sd_y), mean = c(er_x, er_y)),
aes(x = sd, y = mean), color = "red", size = 3, shape = 18) +
# Miscellaneous Formatting
theme_bw() + ggtitle("Possible Portfolios with Two Risky Assets") +
xlab("Volatility") + ylab("Expected Returns") +
scale_y_continuous(label = percent, limits = c(0, max(two_assets$er_p) * 1.2)) +
scale_x_continuous(label = percent, limits = c(0, max(two_assets$sd_p) * 1.2)) +
scale_color_continuous(name = expression(omega[x]), labels = percent)
See the link below for all details.
https://datashenanigan.wordpress.com/2016/05/24/a-gentle-introduction-to-finance-using-r-efficient-frontier-and-capm-part-1/

xyplot time series with positive values in green, negative in red, in R

Is there a neat way to color negative values in red and others in green for a (simplified) time series plot below, using lattice::xyplot?
set.seed(0)
xyplot(zoo(cumsum(rnorm(100))), grid=T)
Lattice is based on grid so you can use grid's clipping functionality
library(lattice)
library(grid)
set.seed(0)
x <- zoo(cumsum(rnorm(100)))
xyplot(x, grid=TRUE, panel = function(x, y, ...){
panel.xyplot(x, y, col="red", ...)
grid.clip(y=unit(0,"native"),just=c("bottom"))
panel.xyplot(x, y, col="green", ...) })
When using type="l" you only have one "line" and it's all one color, so you might instead choose to color points:
set.seed(0); require(zoo); require(lattice)
vals <- zoo(cumsum(rnorm(100)))
png()
xyplot(vals, type=c("l","p"), col=c("red", "green")[1+( vals>0)], grid=T)
dev.off()
I found a solution by, Sundar Dorai-Rag, a fellow now at Google, to a similar request (to color the enclosed areas above and below 0, for which his approach to getting the crossing values for the X's was to invert the results of approx ) as seen here: http://r.789695.n4.nabble.com/shading-under-the-lines-in-a-lattice-xyplot-td793875.html. Instead of coloring the enclosed areas, I gave the borders of the polygons the desired colors and left the interior "transparent":
lpolygon <- function (x, y = NULL, border = NULL, col = NULL, ...) {
require(grid, TRUE)
xy <- xy.coords(x, y)
x <- xy$x
y <- xy$y
gp <- list(...)
if (!is.null(border)) gp$col <- border
if (!is.null(col)) gp$fill <- col
gp <- do.call("gpar", gp)
grid.polygon(x, y, gp = gp, default.units = "native")
}
find.zero <- function(x, y) {
n <- length(y)
yy <- c(0, y)
wy <- which(yy[-1] * yy[-n - 1] < 0)
if(!length(wy)) return(NULL)
xout <- sapply(wy, function(i) {
n <- length(x)
ii <- c(i - 1, i)
approx(y[ii], x[ii], 0)$y
})
xout
}
trellis.par.set(theme = col.whitebg())
png();
xyplot(vals, panel = function(x,y, ...) {
x.zero <- find.zero(x, y)
y.zero <- y > 0
yy <- c(y[y.zero], rep(0, length(x.zero)))
xx <- c(x[y.zero], x.zero)
ord <- order(xx)
xx <- xx[ord]
xx <- c(xx[1], xx, xx[length(xx)])
yy <- c(0, yy[ord], 0)
lpolygon(xx, yy, col="transparent", border = "green")
yy <- c(y[!y.zero], rep(0, length(x.zero)))
xx <- c(x[!y.zero], x.zero)
ord <- order(xx)
xx <- xx[ord]
xx <- c(xx[1], xx, xx[length(xx)])
yy <- c(0, yy[ord], 0)
lpolygon(xx, yy, col = "transparent", border = "red")
panel.abline(h = 0) ;panel.grid(v=-1, h=-1 )
}); dev.off()
I tried writing a custom panel function for this that will break a line on a given value
panel.breakline <- function(x,y,breakat=0,col.line,upper.col="red",lower.col="green",...){
f <- approxfun(x,y)
ff <- function(x) f(x)-breakat
psign <- sign(y-breakat)
breaks <- which(diff(psign) != 0)
interp <- sapply(breaks, function(i) uniroot(ff,c(x[i], x[i+1]))$root)
starts <- c(1,breaks+1)
ends <- c(breaks, length(x))
Map(function(start,end,left,right) {
x <- x[start:end]
y <- y[start:end]
col <- ifelse(y[1]>breakat,upper.col,lower.col)
panel.xyplot(c(left, x, right) ,c(breakat,y,breakat), col.line=col,...)
}, starts, ends, c(NA,interp), c(interp,NA))
}
You can run with
library(zoo)
library(lattice)
set.seed(0)
zz<-zoo(cumsum(rnorm(100)))
xyplot(zz, grid=T, panel.groups=panel.breakline)
And you can change the break point or the colors as well
xyplot(zz, grid=T, panel.groups=panel.breakline,
breakat=2, upper.col="blue", lower.col="orange")
If one was to do it without points, then I'd stick to plot (instead of lattice) and use clip , like in one of the answers here :
Plot a line chart with conditional colors depending on values
dat<- zoo(cumsum(rnorm(100)))
plot(dat, col="red")
clip(0,length(dat),0,max(dat) )
lines(dat, col="green")

Shorten Arrows/Lines/Segments Between Coordinates

I am drawing arrows from one set of points to another with arrows(). I'd like to shorten the arrows by a common length so that they don't overlap with the label. However, it's not obvious how one does that, given that arrows() takes coordinates as input.
For instance, here's an example.
x <- stats::runif(12); y <- stats::rnorm(12)
i <- order(x, y); x <- x[i]; y <- y[i]
plot(x,y, main = "Stack Example", type = 'n')
text(x = x, y = y, LETTERS[1:length(x)], cex = 2, col = sample(colors(), 12))
s <- seq(length(x)-1) # one shorter than data
arrows(x[s], y[s], x[s+1], y[s+1])
How do I shorten the arrows so they don't overlap with the labels?
UPDATE
These are all great answers. In an attempt to come up with something that doesn't presume that points connect in a chain, I wrote the following function, which moves x0y0 (a dataframe where column 1 is x and column 2 is y) closer to xy (same format as x0y0) by absolute distance d.
movePoints <- function(x0y0, xy, d){
total.dist <- apply(cbind(x0y0, xy), 1,
function(x) stats::dist(rbind(x[1:2], x[3:4])))
p <- d / total.dist
p <- 1 - p
x0y0[,1] <- xy[,1] + p*(x0y0[,1] - xy[,1])
x0y0[,2] <- xy[,2] + p*(x0y0[,2] - xy[,2])
return(x0y0)
}
I don't think there is a built-in solution, but if you can guarantee that your points are spaced far enough (otherwise drawing arrows would be difficult anyway!) then you can "shrink" the points the arrows are drawn on by the length of the radius of an imaginary circle circumscribing each letter.
Note that, however, since the scale of the x and y axes are different, we have to be careful to normalize the x and y values before transformation. The reduce_length parameter below is the estimated % of the total viewport that a typical letter occupies. You can tweak with this if you want a little more space around the letters. Also be careful to not pick bad colors that make the letter invisible.
Finally, the imperfections are because of different dimensions for different letters. To really address this, we would need a map of letters to micro x and y adjustments.
x <- stats::runif(12); y <- stats::rnorm(12)
i <- order(x, y); x <- x[i]; y <- y[i]
initx <- x; inity <- y
plot(x,y, main = "Stack Example", type = 'n')
text(x = x, y = y, LETTERS[1:length(x)], cex = 2, col = sample(colors()[13:100], 12))
spaced_arrows <- function(x, y, reduce_length = 0.048) {
s <- seq(length(x)-1) # one shorter than data
xscale <- max(x) - min(x)
yscale <- max(y) - min(y)
x <- x / xscale
y <- y / yscale
# shrink the line around its midpoint, normalizing for differences
# in scale of x and y
lapply(s, function(i) {
dist <- sqrt((x[i+1] - x[i])^2 + (y[i+1] - y[i])^2)
# calculate our normalized unit vector, accounting for scale
# differences in x and y
tmp <- reduce_length * (x[i+1] - x[i]) / dist
x[i] <- x[i] + tmp
x[i+1] <- x[i+1] - tmp
tmp <- reduce_length * (y[i+1] - y[i]) / dist
y[i] <- y[i] + tmp
y[i+1] <- y[i+1] - tmp
newdist <- sqrt((x[i+1] - x[i])^2 + (y[i+1] - y[i])^2)
if (newdist > reduce_length * 1.5) # don't show too short arrows
# we have to rescale back to the original dimensions
arrows(xscale*x[i], yscale*y[i], xscale*x[i+1], yscale*y[i+1])
})
TRUE
}
spaced_arrows(x, y)
I was seeing that some of the arrows were reversed in #RobertKrzyzanowski's answer when the letters were close so I reduced the factor. I also vectorized the function using hte diff() function:
plot(x,y, main = "Stack Example", type = 'n')
text(x = x, y = y, LETTERS[1:length(x)], cex = 2)
gap_arrows <- function(x, fact = 0.075) {
dist <- sqrt( diff(x)^2 + diff(y)^2)
x0 <- x[-length(x)] + (tmp <- fact * (diff(x)) / dist)
x1 <- x[-1] - tmp
y0 <- y[-length(y)] + (tmp <- fact * diff(y) / dist)
y1 <- y[-1] - tmp
arrows(x0,y0,x1,y1)
}
gap_arrows2(x)
I don't really think this is a finished answer, but perhaps useful? I think using a factor ratehr than an absolute reduction creates some shortening when the line is near horizontal that I don't understand. The G-G transition seems odd (too short) in this data:
> dput(x)
c(0.058478488586843, 0.152887222822756, 0.171698493883014, 0.197744736680761,
0.260856857057661, 0.397151953307912, 0.54208036721684, 0.546826156554744,
0.633055359823629, 0.662317642010748, 0.803418542025611, 0.83192756283097
)
> dput(y)
c(-0.256092192198247, -0.961856634130129, 0.0412329219929399,
0.235386572284857, 1.84386200523221, -0.651949901695459, -0.490557443700668,
1.44455085842335, -0.422496832339625, 0.451504053079215, -0.0713080861235987,
0.0779608495637108)

Why lines function closes the path in R?

Objective: Given two points, find the coordinates of the arc that connects them and plot it.
Implementation: One function to find the arc's points (circleFun) and another to plot it (plottest). The colors shows the direction of the path, from red to green.
circleFun <- function(x,y)
{
center <- c((x[1]+y[1])/2,(x[2]+y[2])/2)
diameter <- as.numeric(dist(rbind(x,y)))
r <- diameter / 2
tt <- seq(0,2*pi,length.out=1000)
xx <- center[1] + r * cos(tt)
yy <- center[2] + r * sin(tt)
res <- data.frame(x = xx, y = yy)
if((x[1]<y[1] & x[2]>y[2]) | (x[1]>y[1] & x[2]<y[2])){
res <- res[which(res$x>min(c(x[1],y[1])) & res$y>min(c(x[2],y[2]))),]
} else {
res <- res[which(res$x<max(c(x[1],y[1])) & res$y>min(c(x[2],y[2]))),]
}
return(res)
}
plottest <- function(x1,y1)
{
plot(c(x1[1],y1[1]),c(x1[2],y1[2]),
xlim=c(-2,2),ylim=c(-2,2),col=2:3,pch=20,cex=2,asp=1)
lines(circleFun(x1,y1))
}
par(mfrow=c(2,2))
plottest(c( 1,-1),c(-1, 1))
plottest(c(-1, 1),c( 1,-1))
plottest(c(-1,-1),c( 1, 1))
plottest(c( 1, 1),c(-1,-1))
Result:
Question: I cannot figure out why lines function closes the path in Figures [1,1] and [1,2] while it does not for Figures [2,1] and [2,2]. The expected result should be all Figures as those of the second row.
Thank you!
Like the others said. This being said, here is a much simpler version of your function, with your expected output.
circleFun <- function(x, y) {
center <- (x + y) / 2
radius <- sqrt(sum((x - y)^2)) / 2
angle <- atan2((y - x)[2], (y - x)[1])
direc <- ifelse(abs(angle) > pi / 2, -1, 1)
tt <- seq(0, direc * pi, length.out = 1000)
return(data.frame(x = center[1] + radius * cos(angle + tt),
y = center[2] + radius * sin(angle + tt)))
}
where the direc variable is what decides whether to draw the semi-circle clockwise or counter-clockwise.
I can answer your question about the lines function, but I will leave it to you to figure out how to fix your circleFun to produce the expected behavior:
lines() connects points in the order in which they appear in the data. Also, the path is closed only when the first point is included again at the end of the data. The following figure illustrates this behavior.
par(mfrow=c(1, 2))
plot(x=c(-1, 0, 1), y=c(-1, 1, -1), xlim=c(-2, 2), ylim=c(-2, 2),
type="l", asp=1)
points(x=c(-1, 1), y=c(-1, -1))
plot(x=c(-1, 0, 1, -1), y=c(-1, 1, -1, -1), xlim=c(-2, 2), ylim=c(-2, 2),
type="l", asp=1)
points(x=c(-1, 1), y=c(-1, -1))

How to get something like Matplotlib's symlog scale in ggplot or lattice?

For very heavy-tailed data of both positive and negative sign, I sometimes like to see all the data on a plot without hiding structure in the unit interval.
When plotting with Matplotlib in Python, I can achieve this by selecting a symlog scale, which uses a logarithmic transform outside some interval, and linear plotting inside it.
Previously in R I have constructed similar behavior by transforming the data with an arcsinh on a one-off basis. However, tick labels and the like are very tricky to do right (see below).
Now, I am faced with a bunch of data where the subsetting in lattice or ggplot would be highly convenient. I don't want to use Matplotlib because of the subsetting, but I sure am missing symlog!
Edit:
I see that ggplot uses a package called scales, which solves a lot of this problem (if it works). Automatically choosing tick mark and label placing still looks pretty hard to do nicely though. Some combination of log_breaks and cbreaks perhaps?
Edit 2:
The following code is not too bad
sinh.scaled <- function(x,scale=1){ sinh(x)*scale }
asinh.scaled <- function(x,scale=1) { asinh(x/scale) }
asinh_breaks <- function (n = 5, scale = 1, base=10)
{
function(x) {
log_breaks.callable <- log_breaks(n=n,base=base)
rng <- rng <- range(x, na.rm = TRUE)
minx <- floor(rng[1])
maxx <- ceiling(rng[2])
if (maxx == minx)
return(sinh.scaled(minx, scale=scale))
big.vals <- 0
if (minx < (-scale)) {
big.vals = big.vals + 1
}
if (maxx>scale) {
big.vals = big.vals + 1
}
brk <- c()
if (minx < (-scale)) {
rbrk <- log_breaks.callable( c(-min(maxx,-scale), -minx ) )
rbrk <- -rev(rbrk)
brk <- c(brk,rbrk)
}
if ( !(minx>scale | maxx<(-scale)) ) {
rng <- c(max(minx,-scale), min(maxx,scale))
minc <- floor(rng[1])
maxc <- ceiling(rng[2])
by <- floor((maxc - minc)/(n-big.vals)) + 1
cb <- seq(minc, maxc, by = by)
brk <- c(brk,cb)
}
if (maxx>scale) {
brk <- c(brk,log_breaks.callable( c(max(minx,scale), maxx )))
}
brk
}
}
asinh_trans <- function(scale = 1) {
trans <- function(x) asinh.scaled(x, scale)
inv <- function(x) sinh.scaled(x, scale)
trans_new(paste0("asinh-", format(scale)), trans, inv,
asinh_breaks(scale = scale),
domain = c(-Inf, Inf))
}
A solution based on the package scales and inspired by Brian Diggs' post mentioned by #Dennis:
symlog_trans <- function(base = 10, thr = 1, scale = 1){
trans <- function(x)
ifelse(abs(x) < thr, x, sign(x) *
(thr + scale * suppressWarnings(log(sign(x) * x / thr, base))))
inv <- function(x)
ifelse(abs(x) < thr, x, sign(x) *
base^((sign(x) * x - thr) / scale) * thr)
breaks <- function(x){
sgn <- sign(x[which.max(abs(x))])
if(all(abs(x) < thr))
pretty_breaks()(x)
else if(prod(x) >= 0){
if(min(abs(x)) < thr)
sgn * unique(c(pretty_breaks()(c(min(abs(x)), thr)),
log_breaks(base)(c(max(abs(x)), thr))))
else
sgn * log_breaks(base)(sgn * x)
} else {
if(min(abs(x)) < thr)
unique(c(sgn * log_breaks()(c(max(abs(x)), thr)),
pretty_breaks()(c(sgn * thr, x[which.min(abs(x))]))))
else
unique(c(-log_breaks(base)(c(thr, -x[1])),
pretty_breaks()(c(-thr, thr)),
log_breaks(base)(c(thr, x[2]))))
}
}
trans_new(paste("symlog", thr, base, scale, sep = "-"), trans, inv, breaks)
}
I am not sure whether the impact of a parameter scale is the same as in Python, but here are a couple of comparisons (see Python version here):
data <- data.frame(x = seq(-50, 50, 0.01), y = seq(0, 100, 0.01))
data$y2 <- sin(data$x / 3)
# symlogx
ggplot(data, aes(x, y)) + geom_line() + theme_bw() +
scale_x_continuous(trans = symlog_trans())
# symlogy
ggplot(data, aes(y, x)) + geom_line() + theme_bw()
scale_y_continuous(trans="symlog")
# symlog both, threshold = 0.015 for y
# not too pretty because of too many breaks in short interval
ggplot(data, aes(x, y2)) + geom_line() + theme_bw()
scale_y_continuous(trans=symlog_trans(thr = 0.015)) +
scale_x_continuous(trans = "symlog")
# Again symlog both, threshold = 0.15 for y
ggplot(data, aes(x, y2)) + geom_line() + theme_bw()
scale_y_continuous(trans=symlog_trans(thr = 0.15)) +
scale_x_continuous(trans = "symlog")

Resources