Plot Sphere with custom gridlines in R - r

I would like to plot a sphere in R with the gridlines on the surface corresponding to the equal area gridding of the sphere using the arcos transformation.
I have been experimenting with the R packakge rgl and got some help from :
Plot points on a sphere in R
Which plots the gridlines with equal lat long spacing.
I have the below function which returns a data frame of points that are the cross over points of the grid lines I want, but not sure how to proceed.
plot_sphere <- function(theta_num,phi_num){
theta <- seq(0,2*pi,(2*pi)/(theta_num))
phi <- seq(0,pi,pi/(phi_num))
tmp <- seq(0,2*phi_num,2)/phi_num
phi <- acos(1-tmp)
tmp <- cbind(rep(seq(1,theta_num),each = phi_num),rep(seq(1,phi_num),times = theta_num))
results <- as.data.frame(cbind(theta[tmp[,1]],phi[tmp[,2]]))
names(results) <- c("theta","phi")
results$x <- cos(results$theta)*sin(results$phi)
results$y <- sin(results$theta)*sin(results$phi)
results$z <- cos(results$phi)
return(results)
}
sphere <- plot_sphere(10,10)
Can anyone help, in general I am finding the rgl functions tricky to work with.

If you use lines3d or plot3d(..., type="l"), you'll get a plot joining the points in your dataframe. To get breaks (you don't want one long line), add rows containing NA values.
The code in your plot_sphere function seems really messed up (you compute phi twice, you don't generate vectors of the requested length, etc.), but this function based on it works:
function(theta_num,phi_num){
theta0 <- seq(0,2*pi, len = theta_num)
tmp <- seq(0, 2, len = phi_num)
phi0 <- acos(1-tmp)
i <- seq(1, (phi_num + 1)*theta_num) - 1
theta <- theta0[i %/% (phi_num + 1) + 1]
phi <- phi0[i %% (phi_num + 1) + 1]
i <- seq(1, phi_num*(theta_num + 1)) - 1
theta <- c(theta, theta0[i %% (theta_num + 1) + 1])
phi <- c(phi, phi0[i %/% (theta_num + 1) + 1])
results <- data.frame( x = cos(theta)*sin(phi),
y = sin(theta)*sin(phi),
z = cos(phi))
lines3d(results)
}

Related

Loop coordinates of intesected line and an outline - R

Based on this answer How to get the coordinates of an intesected line with an outline - R ,I tried to run a loop using the script below. Any idea why I can not plot all the intersection points and lines? The shape is different than the answer given
Code:
library(ggplot2)
library(sf)
t <- seq(0, 2*pi, by=0.1)
df <- data.frame(x = 13*sin(t)^3,
y = 4*cos(t)-2*cos(3*t)-5*cos(4*t)-cos(2*t))
df <- rbind(df, df[1,]) # close the polygon
meanX <- mean(df$x)
meanY <- mean(df$y)
# Transform your data.frame in a sf polygon (the first and last points
# must have the same coordinates)
#> Linking to GEOS 3.5.1, GDAL 2.1.3, proj.4 4.9.2
poly <- st_sf(st_sfc(st_polygon(list(as.matrix(df)))))
# Choose the angle (in degrees)
rotAngles <- 5
for(angle in seq(0,359,rotAngles)) {
# Find the minimum length for the line segment to be always
# outside the cloud whatever the choosen angle
maxX <- max(abs(abs(df[,"x"]) - abs(meanX)))
maxY <- max(abs(abs(df[,"y"]) - abs(meanY)))
line_length = sqrt(maxX^2 + maxY^2) + 1
# Find the coordinates of the 2 points to draw a line with
# the intended angle.
# This is the gray line on the graph below
line <- rbind(c(meanX,meanY),
c(meanX + line_length * cos((pi/180)*angle),
meanY + line_length * sin((pi/180)*angle)))
# Transform into a sf line object
line <- st_sf(st_sfc(st_linestring(line)))
# Intersect the polygon and line. The result is a two points line
# shown in black on the plot below
intersect_line <- st_intersection(poly, line)
# Extract only the second point of this line.
# This is the intersecting point
intersect_point <- st_coordinates(intersect_line)[2,c("X","Y")]
# Visualise this with ggplot and without geom_sf
# you need first transform back the lines into data.frame
line <- as.data.frame(st_coordinates(line))[,1:2]
intersect_line <- as.data.frame(st_coordinates(intersect_line))[,1:2]
ggplot() + geom_path(data=df, aes(x = x, y = y)) +
geom_line(data=line, aes(x = X, y = Y), color = "gray80", lwd = 3) +
geom_line(data=intersect_line, aes(x = X, y = Y), color = "gray20", lwd = 1) +
geom_point(aes(meanX, meanY), colour="orangered", size=2) +
geom_point(aes(intersect_point["X"], intersect_point["Y"]),
colour="orangered", size=2) +
theme_bw()
}
First we'll go back to #Gilles polygon shape as it is more consistent with his reasoning and presentation:
# Generate a heart shape
t <- seq(0, 2*pi, by=0.1)
df <- data.frame(x = 16*sin(t)^3,
y = 13*cos(t)-5*cos(2*t)-2*cos(3*t)-cos(4*t))
df <- rbind(df, df[1,]) # close the polygon
meanX <- mean(df$x)
meanY <- mean(df$y)
library(sf)
poly <- st_sf(st_sfc(st_polygon(list(as.matrix(df)))))
These elements don't change and don't need to calculated multiple times inside a loop:
maxX <- max(abs(abs(df[,"x"]) - abs(meanX)))
maxY <- max(abs(abs(df[,"y"]) - abs(meanY)))
line_length = sqrt(maxX^2 + maxY^2) + 1
Then your rotAngle and angle:
rotAngle <- 5
angle <- seq(0, 359, rotAngle)
Focusing attention on the for loop, the first line call has elements that do and don't change. Let's make an empty list to hold our results, made outside the for loop, that will hold 2x2 matrices:
line_lst <- list()
for (j in 1:length(angle)) {
line_lst[[j]] <- matrix(nrow = 2, ncol=2)
line_lst[[j]][1,1] <- meanX
line_lst[[j]][1,2] <- meanY
line_lst[[j]][2,1] <- meanX + line_length * cos((pi/180)*angle[j])
line_lst[[j]][2,2] <- meanY + line_length * sin((pi/180)*angle[j])
}
line_lst[[1]]
[,1] [,2]
[1,] 1.225402e-06 0.09131118
[2,] 2.425684e+01 0.09131118
line_lst[[72]]
[,1] [,2]
[1,] 1.225402e-06 0.09131118
[2,] 2.416454e+01 -2.02281169
Those seem reasonable, and this was mainly what I wanted to show, explicating on the LHS[j] <- RHS[j], with the which iteration we're on in 1:length(angle). And on to linestring, intersection, and points,
same make an empty receiver, loop thru:
# here we have mismatch of establishing an `i` counter then
# counting `j`, which look close enough to tired eyes
# this will result in NULL(s)
linestring_lst <- list()
for (i in 1:length(line_lst)) { # this causes future error
linestring_lst[[j]] <- st_sf(st_sfc(st_linestring(line_lst[[j]])))
}
# simply keeping our accounting right, using all `i` or all `j`,
# or staying away from things that look alike and using `k` here
for (k in 1:length(line_lst)) {
linestring_lst[[k]] <- st_sf(st_sfc(st_linestring(line_lst[[k]])))
}
intersection_lst <- list()
for (j in 1:length(linestring_lst)) {
intersection_lst[[j]] <- st_intersection(poly, linestring_lst[[j]])
}
intersect_points <- list()
for (j in 1:length(intersection_lst)) {
intersect_points[[j]] <- st_coordinates(intersection_lst[[j]])[2,c('X','Y')]
}
The things to remember here related to for loops, create your receiver objects outside the loop, index both the LHS[j] and RHS[j] ([ for vector-like receivers, [[ for lists). And having done each of these independently, you can put it all in one for loop.
And final step, take the lists to data.frame(s) for use in ggplot.
intersect_pts_df <- as.data.frame(do.call('rbind', intersect_points))
head(intersect_pts_df, n = 3)
X Y
1 14.96993 0.09131118
2 15.56797 1.45333163
3 15.87039 2.88968964

Generating 3D data with cube as a decision surface

I am new to using r program. I have a task to use r to create a function to simulate standard normal distribution containing 500 observations and three variables, x,y,& z.
I am to use cube as a decision surface to categorize observations based on whether they fell within or outside the cube.
Below is my code. I am able to plot the 3D data, but I am not sure of how to categorize the datasets into two classes.
library(scatterplot3d)
set.seed (1234)
nObs <- 500
x <- matrix (rnorm (1.25*nObs), ncol =2)
y <- matrix (rnorm (1.25*nObs), ncol =2)
z <- matrix (rnorm (1.25*nObs), ncol =2)
mSample <- function(nObs,x,y,z){
x1 <- rnorm(1,x)
x1[y==1,] <- x[y==1,] + 1
mSample <- as_tibble(rbind(mvnfast::rmvn(x,y = y1,z = z1), mvnfast::rmvn(x,y = y1,z = z1)))
mSample[1:x1, 1.25] <- 0
mSample[(x1 + 1):(x1 + 1), 1.25] <- 1
mSample <- mSample[sample(nrow(mSample)), ]
colnames(mSample <- c("x", "y", "class"))
mSample
}
spl <- scatterplot3d(x,y,z)
spl <- scatterplot3d(x,y,z,pch=16,highlight.3d=TRUE)
I had a similar question to this recently. Basically, to know if a given point is inside or outside of a cube, first you need to know the length of the cube.
Then, simply iterate over all the points (nObs) and do an if statement
if (x > -cubeLength ** x < cubeLength && y > -cubeLength ** y < cubeLength && z > -cubeLength ** z < cubeLength) {
classify positive
}
else {
classify negative
}

Turning Angles of Sequential Vectors: Complex Plane

I am trying to compute the turning angles of sequential vectors in the complex plane. Please see the code below for a demo data frame and my attempt at calculating the angles.
The sign of the angles seem correct: left turns are positive and right turns are negative. However, the turning angles do not look right when I reference the plot. NOTE: I want the turning angles and not the angles between the vectors. Image for reference:
set.seed(123)
# Generate a random path and plot it
path.short.random <- function(points = 6) {
x <- runif(points, -1, 1)
y <- rnorm(points, 0, 0.25)
i <- order(x, y)
x <- x[i]
y <- y[i]
path <- data.frame(x = x, y = y)
plot(x, y, main = "Random Path", asp = 1)
# draw arrows from point to point
s <- seq(length(x) - 1) # one shorter than data
arrows(x[s], y[s], x[s + 1], y[s + 1], col = 1:points)
path
}
# Save the path as a data frame
df <- path.short.random()
# Compute sequential turning angles
get.angles <- function(df) {
df$polar <- complex(real = df$x, imaginary = df$y)
df$displacement <- c(0, diff(df$polar))
diff(Arg(df$displacement[2:nrow(df)]))
}
get.angles(df)

How to clip an isosurface to a ball?

Consider the Togliatti implicit surface. I want to clip it to the ball centered at the origin with radius 4.8. A solution, with the misc3d package, consists in using the mask argument of the computeContour3d function, which allows to use only the points satisfying x^2+y^2+z^2 < 4.8^2:
library(misc3d)
# Togliatti surface equation: f(x,y,z) = 0
f <- function(x,y,z){
w <- 1
64*(x-w)*
(x^4-4*x^3*w-10*x^2*y^2-4*x^2*w^2+16*x*w^3-20*x*y^2*w+5*y^4+16*w^4-20*y^2*w^2) -
5*sqrt(5-sqrt(5))*(2*z-sqrt(5-sqrt(5))*w)*(4*(x^2+y^2-z^2)+(1+3*sqrt(5))*w^2)^2
}
# make grid
nx <- 220; ny <- 220; nz <- 220
x <- seq(-5, 5, length=nx)
y <- seq(-5, 5, length=ny)
z <- seq(-4, 4, length=nz)
g <- expand.grid(x=x, y=y, z=z)
# calculate voxel
voxel <- array(with(g, f(x,y,z)), dim = c(nx,ny,nz))
# mask: keep points satisfying x^2+y^2+z^2 < 4.8^2, in order to
# clip the surface to the ball of radius 4.8
mask <- array(with(g, x^2+y^2+z^2 < 4.8^2), dim = c(nx,ny,nz))
# compute isosurface
surf <- computeContour3d(voxel, maxvol=max(voxel), level=0, mask=mask, x=x, y=y, z=z)
# draw isosurface
drawScene.rgl(makeTriangles(surf, smooth=TRUE))
But the borders of the resulting surface are irregular:
How to get regular, smooth borders?
The solution I found resorts to spherical coordinates. It consists in defining the function f in terms of spherical coordinates (ρ, θ, ϕ), then to compute the isosurface with ρ running from 0 to the desired radius, and then to transform the result to Cartesian coordinates:
# Togliatti surface equation with spherical coordinates
f <- function(ρ, θ, ϕ){
w <- 1
x <- ρ*cos(θ)*sin(ϕ)
y <- ρ*sin(θ)*sin(ϕ)
z <- ρ*cos(ϕ)
64*(x-w)*
(x^4-4*x^3*w-10*x^2*y^2-4*x^2*w^2+16*x*w^3-20*x*y^2*w+5*y^4+16*w^4-20*y^2*w^2) -
5*sqrt(5-sqrt(5))*(2*z-sqrt(5-sqrt(5))*w)*(4*(x^2+y^2-z^2)+(1+3*sqrt(5))*w^2)^2
}
# make grid
nρ <- 300; nθ <- 400; nϕ <- 300
ρ <- seq(0, 4.8, length = nρ) # ρ runs from 0 to the desired radius
θ <- seq(0, 2*pi, length = nθ)
ϕ <- seq(0, pi, length = nϕ)
g <- expand.grid(ρ=ρ, θ=θ, ϕ=ϕ)
# calculate voxel
voxel <- array(with(g, f(ρ,θ,ϕ)), dim = c(nρ,nθ,nϕ))
# calculate isosurface
surf <- computeContour3d(voxel, maxvol=max(voxel), level=0, x=ρ, y=θ, z=ϕ)
# transform to Cartesian coordinates
surf <- t(apply(surf, 1, function(rtp){
ρ <- rtp[1]; θ <- rtp[2]; ϕ <- rtp[3]
c(
ρ*cos(θ)*sin(ϕ),
ρ*sin(θ)*sin(ϕ),
ρ*cos(ϕ)
)
}))
# draw isosurface
drawScene.rgl(makeTriangles(surf, smooth=TRUE, color = "violetred"))
Now the resulting surface has regular, smooth borders:
Your solution is excellent for the problem you stated, because spherical coordinates are so natural for that boundary. However, here is a more general solution that would work for other smooth boundaries.
The idea is to allow input of a boundary function, and cull points when they are too large or too small. In your case it would be the squared distance from the origin, and you would want to cull points where the value is bigger than 4.8^2. But sometimes the triangles being drawn to make the smooth surface should only be partially culled: one point would be kept and two deleted, or two kept and one deleted. If you cull the whole triangle that leads to the jagged edges in your original plot.
To fix this, the points can be modified. If only one is supposed to be kept, then the other two points can be shrunk towards it until they lie on an approximation to the boundary. If two are supposed to be kept you want the shape to be a quadrilateral, so you would build that out of two triangles.
This function does that, assuming the input surf is the output of computeContour3d:
boundSurface <- function(surf, boundFn, bound = 0, greater = TRUE) {
# Surf is n x 3: each row is a point, triplets are triangles
values <- matrix(boundFn(surf) - bound, 3)
# values is (m = n/3) x 3: each row is the boundFn value at one point
# of a triangle
if (!greater)
values <- -values
keep <- values >= 0
# counts is m vector counting number of points to keep in each triangle
counts <- apply(keep, 2, sum)
# result is initialized to an empty array
result <- matrix(nrow = 0, ncol = 3)
# singles is set to all the rows of surf where exactly one
# point in the triangle is kept, say s x 3
singles <- surf[rep(counts == 1, each = 3),]
if (length(singles)) {
# singleValues is a subset of values where only one vertex is kept
singleValues <- values[, counts == 1]
singleIndex <- 3*col(singleValues) + 1:3 - 3
# good is the index of the vertex to keep, bad are those to fix
good <- apply(singleValues, 2, function(col) which(col >= 0))
bad <- apply(singleValues, 2, function(col) which(col < 0))
for (j in 1:ncol(singleValues)) {
goodval <- singleValues[good[j], j]
for (i in 1:2) {
badval <- singleValues[bad[i,j], j]
alpha <- goodval/(goodval - badval)
singles[singleIndex[bad[i,j], j], ] <-
(1-alpha)*singles[singleIndex[good[j], j],] +
alpha *singles[singleIndex[bad[i,j], j],]
}
}
result <- rbind(result, singles)
}
doubles <- surf[rep(counts == 2, each = 3),]
if (length(doubles)) {
# doubleValues is a subset of values where two vertices are kept
doubleValues <- values[, counts == 2]
doubleIndex <- 3*col(doubleValues) + 1:3 - 3
doubles2 <- doubles
# good is the index of the vertex to keep, bad are those to fix
good <- apply(doubleValues, 2, function(col) which(col >= 0))
bad <- apply(doubleValues, 2, function(col) which(col < 0))
newvert <- matrix(NA, 2, 3)
for (j in 1:ncol(doubleValues)) {
badval <- doubleValues[bad[j], j]
for (i in 1:2) {
goodval <- doubleValues[good[i,j], j]
alpha <- goodval/(goodval - badval)
newvert[i,] <-
(1-alpha)*doubles[doubleIndex[good[i,j], j],] +
alpha *doubles[doubleIndex[bad[j], j],]
}
doubles[doubleIndex[bad[j], j],] <- newvert[1,]
doubles2[doubleIndex[good[1,j], j],] <- newvert[1,]
doubles2[doubleIndex[bad[j], j],] <- newvert[2,]
}
result <- rbind(result, doubles, doubles2)
}
# Finally add all the rows of surf where the whole
# triangle is kept
rbind(result, surf[rep(counts == 3, each = 3),])
}
You would use it after computeContour3d and before makeTriangles, e.g.
fn <- function(x) {
apply(x^2, 1, sum)
}
drawScene.rgl(makeTriangles(boundSurface(surf, fn, bound = 4.8^2,
greater = FALSE),
smooth = TRUE))
Here's the output I see:
It's not quite as good as yours, but it would work for many different boundary functions.
Edited to add: Version 0.100.26 of rgl now has a function clipMesh3d which incorporates these ideas.

ggplot2 - Modify geom_density2d to accept weights as a parameter?

This is my first post to the R-community, so pardon me if it is silly. I would like to use the functions geom_density2d and stat_density2d in ggplot2 to plot kernel density estimates, but the problem is that they can't handle weighted data. From what I understand, these two functions call the function kde2d from package MASS to make the kernel density estimate. And the kde2d doesn't take data weights as a parameter.
Now, I have found this altered version of kde2d http://www.inside-r.org/node/226757, which takes weights as a parameter and is based on the source code of kde2d. The code of this function:
kde2d.weighted <- function (x, y, w, h, n = 25, lims = c(range(x), range(y))) {
nx <- length(x)
if (length(y) != nx)
stop("data vectors must be the same length")
if (length(w) != nx & length(w) != 1)
stop("weight vectors must be 1 or length of data")
gx <- seq(lims[1], lims[2], length = n) # gridpoints x
gy <- seq(lims[3], lims[4], length = n) # gridpoints y
if (missing(h))
h <- c(bandwidth.nrd(x), bandwidth.nrd(y));
if (missing(w))
w <- numeric(nx)+1;
h <- h/4
ax <- outer(gx, x, "-")/h[1] # distance of each point to each grid point in x-direction
ay <- outer(gy, y, "-")/h[2] # distance of each point to each grid point in y-direction
z <- (matrix(rep(w,n), nrow=n, ncol=nx, byrow=TRUE)*matrix(dnorm(ax), n, nx)) %*% t(matrix(dnorm(ay), n, nx))/(sum(w) * h[1] * h[2]) # z is the density
return(list(x = gx, y = gy, z = z))
}
I would like to make the functions geom_density2d and stat_density2d call kd2d.weighted instead of kde2d, and by that making them accept weighted data.
I have never changed any functions in existing R packages so my question is what is the easiest way doing this?
You can actually pass your own density data to geom_contour which would probably be the easiest. Let's start with a sample dataset by adding weights to the geyser data.
library("MASS")
data(geyser, "MASS")
geyserw <- transform(geyser,
weight = sample(1:5, nrow(geyser), replace=T)
)
Now we use your weighted function to calculate the density and turn it into a data.frame
dens <- kde2d.weighted(geyserw$duration, geyserw$waiting, geyserw$weight)
dfdens <- data.frame(expand.grid(x=dens$x, y=dens$y), z=as.vector(dens$z))
Now we plot the data
ggplot(geyserw, aes(x = duration, y = waiting)) +
geom_point() + xlim(0.5, 6) + ylim(40, 110) +
geom_contour(aes(x=x, y=y, z=z), data= dfdens)
And that should do it

Resources