Generating a sequence of equidistant points on polygon boundary - r

I am looking for a procedure that allows me to generate a sequence of equidistant points (coordinates) along the sides of an arbitrary polygon.
Imaging a polygon defined by the coordinates of its vertexes:
poly.mat <- matrix(c(0,0,
0,1,
0.5,1.5,
0.5,0,
0,0 # last row included to close the polygon
), byrow = T, ncol = 2)
colnames(poly.mat) <- c("x", "y")
plot(poly.mat, type = "l")
If the length of the sequence I want to generate is n (adjustable), how I can produce a sequence, starting at (0,0), of equidistant coordinates.
I got as far as calculating the perimeter of the shape with the geosphere package (which I believe I need)
library(geosphere)
n <- 50 # sequence of length set to be 50
perim <- perimeter(poly.mat)
perim/n # looks like every section needs to be 8210.768 something in length

You will have to write the code yourself. Sorry, there isn't a library function for every last detail of every last assignment. Assuming that each pair of points defines a line segment, you could just generate N points along each segment, as in
begin = [xbegin, ybegin ];
end = [xend, yend ];
xdist = ( xend - xbegin ) / nintervals;
ydist = ( yend - ybegin ) / nintervals;
then your points are given by [ xbegin + i * xdist, ybegin + i * ydist ]

Here is the solution I came up with.
pointDistance <- function(p1, p2){
sqrt((p2[,1]-p1[,1])^2) + sqrt((p2[,2]-p1[,2])^2)
}
getPos <- function(shp.mat, ll){
greaterLL <- shp.mat$cumdis > ll
if(all(greaterLL == FALSE)) return(poly.mat[nrow(poly.mat), c("x", "y")])
smallRow <- min(which(greaterLL)) # the smallest coordinate that has greater length
p.start <- shp.mat[smallRow-1, c("x","y")]
p.end <- shp.mat[smallRow, c("x","y")]
cumVal <- shp.mat$cumdis[smallRow]
prop <- (ll-shp.mat$cumdis[smallRow-1])/(shp.mat$cumdis[smallRow]-shp.mat$cumdis[smallRow-1])
p.start + (prop)* (p.end-p.start)
}
# shp1
poly.mat <- matrix(c(0,0,
0,1,
0.5,1.5,
0.5,0,
0,0
),byrow = T, ncol = 2)
colnames(poly.mat) <- c("x", "y")
poly.mat <- as.data.frame(poly.mat)
# Main fun
pointsOnPath <- function(shp.mat, n){
dist <- vector(mode = "numeric", length = nrow(shp.mat)-1)
for(i in 2:nrow(shp.mat)){
dist[i] <- pointDistance(p1 = shp.mat[i,], p2 = shp.mat[i-1,])
}
shp.mat$dist <- dist
shp.mat$cumdis <- cumsum(shp.mat$dist)
dis <- matrix(seq(from = 0, to = max(shp.mat$cumdis), length.out = n+1), ncol = 1)
out <- lapply(dis, function(x) getPos(shp.mat = shp.mat, ll = x))
out <- do.call("rbind", out)
out$dis <- dis
out[-nrow(out),]
}
df <- pointsOnPath(shp.mat = poly.mat, 5)
# Plot
plot(poly.mat$x, poly.mat$y, type = "l", xlim = c(0,1.5), ylim = c(0,1.5))
points(df$x, df$y, col = "red", lwd = 2)
There is room for improving the code, but it should return the correct result

Related

Draw a vector field from matrix multiplication r

I'm trying to print a vector field based on a matrix multiplication. The problem is that the function that will print values to make the matrix multiplication can only take a single number. When a range of number is put into the all.p function, the output is not usable to do the matrix multiplication. Is there a way to change all.p so that with multiple inputs, the matrix multiplication can still be valid, and the vector field can be computed? The code fails at the vectorfield function as this function with put the values into the range 0 to 1, but the all.p can't take multiple inputs.
geno.fit = matrix(c(0.791,1.000,0.834,
0.670,1.006,0.901,
0.657,0.657,1.067),
nrow = 3,
ncol = 3,
byrow = T)
all.p <- function(p) {
if (length(p)>1) {
stop("More numbers in input than expected")
}
P = p^2
PQ = 2*p*(1-p)
Q = (1-p)^2
return(list=c(P=P,PQ=PQ,Q=Q))
}
library(pracma)
f <- function(x, y) all.p(x) %*% geno.fit %*% all.p(y)
xx <- c(0, 1); yy <- c(0, 1)
vectorfield(fun = f, xlim = xx, ylim = yy, scale = 0.1)
for (xs in seq(0, 1, by = 0.25)) {
sol <- rk4(f, 0, 1, xs, 100)
lines(sol$x, sol$y, col="darkgreen")
}
grid()
I also tried to use a for loop.
f <- function(x, y, n = 16) {
space3 = matrix(NA,nrow = n,ncol = n)
for (i in 1:(length(x))) {
for (j in 1:(length(y))) {
# Calculate mean fitness
space3[i,j] = all.p(x[i]) %*% geno.fit %*% all.p(y[j])
}
}
return(space3)
}
xx <- c(0, 1); yy <- c(0, 1)
f(seq(0,1,length.out = 16), seq(0,1,length.out = 16))
vectorfield(fun = f, xlim = xx, ylim = yy, scale = 0.1)
Below is the code to make the gradient ascend (without the vectors).
library(fields) # for image.plot
res = 0.01
seq.x = seq(0,1,by = res)
space = outer(seq.x,seq.x,"*")
pace2 = space
for (i in 1:length(seq.x)) {
for (j in 1:length(seq.x)) {
space[i,j] = all.p(1-seq.x[i]) %*% geno.fit %*% all.p(1-seq.x[j])
}
}
round(t(space),3)
new.space = t(space)
image.plot(new.space)
by.text = 8
for (i in seq(1,length(seq.x),by = by.text)) {
for (j in seq(1,length(seq.x),by = by.text)) {
text(seq.x[i],seq.x[j],
labels = round(new.space[i,j],4),
cex = new.space[i,j]/2,
col = "black")
}
}
contour(new.space,ylim=c(1,0),add = T, nlevels = 50)
I was able to make the vector field function work, but it's not showing what I was expecting from the previous gradient ascend vector field:
How can the 2 be reconciled? (i.e., plotting the vectors on the gradient ascend image which would show the proper direction of the vectors in the steepest ascend)
Here is my solution:
library(fields) # for image.plot
library(plotly)
library(raster)
# Genotype fitness matrix -------------------------------------------------
geno.fit = matrix(c(0.791,1.000,0.834,
0.670,1.006,0.901,
0.657,0.657,1.067),
nrow = 3,
ncol = 3,
byrow = T)
# Resolution
res = 0.01
# Sequence of X
seq.x = seq(0,1,by = res)
# Make a matrix
space = outer(seq.x,seq.x,"*")
# Function to calculate the AVERAGE fitness for a given frequency of an allele to get the expected frequency of genotypes in a population
all.p <- function(p) { # Takes frequency of an allele in the population
if (length(p)>1) { # Has to be only 1 number
stop("More numbers in input than expected")
}
P = p^2 # Gets the AA
PQ = 2*p*(1-p) # gets the Aa
Q = (1-p)^2 # Gets the aa
return(list=c(P=P, # Return the values
PQ=PQ,
Q=Q))
}
# Examples
all.p(0)
all.p(1)
# Plot the matrix of all combinations of genotype frequencies
image.plot(space,
ylim=c(1.05,-0.05),
ylab= "Percentage of Chromosome EF of TD form",
xlab= "Percentage of Chromosome CD of BL form")
# Backup the data
space2 = space
# calculate the average fitness for EVERY combination of frequency of 2 genotypes
for (i in 1:length(seq.x)) {
for (j in 1:length(seq.x)) {
# Calculate mean fitness
space[i,j] = all.p(1-seq.x[i]) %*% geno.fit %*% all.p(1-seq.x[j])
}
}
# Show the result
round(t(space),3)
# Transform the space
new.space = t(space)
image.plot(new.space,
# ylim=c( 1.01,-0.01),
ylab= "Percentage of Chromosome EF of TD (Tidbinbilla) form",
xlab= "Percentage of Chromosome CD of BL (Blundell) form")
# Add the numbers to get a better sense of the average fitness values at each point
by.text = 8
for (i in seq(1,length(seq.x),by = by.text)) {
for (j in seq(1,length(seq.x),by = by.text)) {
text(seq.x[i],seq.x[j],
labels = round(new.space[i,j],4),
cex = new.space[i,j]/2,
col = "black") # col = "gray70"
}
}
# Add contour lines
contour(new.space,ylim=c(1,0),add = T, nlevels = 50)
# Plotly 3D graph --------------------------------------------------------
# To get the 3D plane in an INTERACTIVE graph
xyz=cbind(expand.grid(seq.x,
seq.x),
as.vector(new.space))
plot_ly(x = xyz[,1],y = xyz[,2],z = xyz[,3],
color = xyz[,3])
# Vector field on the Adaptive landscape ----------------------------------
library(tidyverse)
library(ggquiver)
raster2quiver <- function(rast, aggregate = 50, colours = terrain.colors(6), contour.breaks = 200)
{
names(rast) <- "z"
quiv <- aggregate(rast, aggregate)
terr <- terrain(quiv, opt = c('slope', 'aspect'))
quiv$u <- -terr$slope[] * sin(terr$aspect[])
quiv$v <- -terr$slope[] * cos(terr$aspect[])
quiv_df <- as.data.frame(quiv, xy = TRUE)
rast_df <- as.data.frame(rast, xy = TRUE)
print(ggplot(mapping = aes(x = x, y = y, fill = z)) +
geom_raster(data = rast_df, na.rm = TRUE) +
geom_contour(data = rast_df,
aes(z=z, color=..level..),
breaks = seq(0,3, length.out = contour.breaks),
size = 1.4)+
scale_color_gradient(low="blue", high="red")+
geom_quiver(data = quiv_df, aes(u = u, v = v), vecsize = 1.5) +
scale_fill_gradientn(colours = colours, na.value = "transparent") +
theme_bw())
return(quiv_df)
}
r <-raster(
space,
xmn=range(seq.x)[1], xmx=range(seq.x)[2],
ymn=range(seq.x)[1], ymx=range(seq.x)[2],
crs=CRS("+proj=utm +zone=11 +datum=NAD83")
)
# Draw the adaptive landscape
raster2quiver(rast = r, aggregate = 2, colours = tim.colors(100))
Not exactly what I wanted, but it does what I was looking for!

How to calculate fuzzy performance index and normalized classification entropy in R

I am running Fuzzy C-Means Clustering using e1071 package. I want to decide the optimum number of clusters based on fuzzy performance index (FPI) (extent of fuzziness) and normalized classification entropy (NCE) (degree of disorganization of specific class) given in the following formula
where c is the number of clusters and n is the number of observations, μik is the fuzzy membership and loga is the natural logarithm.
I am using the following code
library(e1071)
x <- rbind(matrix(rnorm(100,sd=0.3),ncol=2),
matrix(rnorm(100,mean=1,sd=0.3),ncol=2))
cl <- cmeans(x,2,20,verbose=TRUE,method="cmeans")
cl$membership
I have been able to extract the μik i.e. fuzzy membership. Now, cmeans has to for different number of clusters e.g. 2 to 6 and the FPI and NCE has to be calculated to have a plot like the following
How can it be achieved in R?
Edit
I have tried the code provided by #nya for iris dataset using the following code
df <- scale(iris[-5])
FPI <- function(cmem){
c <- ncol(cmem)
n <- nrow(cmem)
1 - (c / (c - 1)) * (1 - sum(cmem^2) / n)
}
NCE <- function(cmem){
c <- ncol(cmem)
n <- nrow(cmem)
(n / (n - c)) * (- sum(cmem * log(cmem)) / n)
}
# prepare variables
cl <- list()
fpi <- nce <- NULL
# cycle through the desired number of clusters
for(i in 2:6){
cl[[i]] <- cmeans(df, i, 20, method = "cmeans")
fpi <- c(fpi, FPI(cl[[i]]$membership))
nce <- c(nce, NCE(cl[[i]]$membership))
}
# add space for the second axis label
par(mar = c(5,4,1,4) + .1)
# plot FPI
plot(2:6, fpi, lty = 2, pch = 18, type = "b", xlab = "Number of clusters", ylab = "FPI")
# plot NCE, manually adding the second axis
par(new = TRUE)
plot(2:6, nce, lty = 1, pch = 15, type = "b", xlab = "", ylab = "", axes = FALSE)
axis(4, at = pretty(range(nce)))
mtext("NCE", side = 4, line = 3)
# add legend
legend("top", legend = c("FPI", "NCE"), pch = c(18,15), lty = c(2,1), horiz = TRUE)
The minimum values of fuzzy performance index(FPI) and normalized classification entropy (NCE) were considered to decide the optimum number of clusters. NCE is always increasing and FPI is showing the decreasing value. Ideally it should have been
With available equations, we can program our own functions. Here, the two functions use equations present in the paper you suggested and one of the references the authors cite.
FPI <- function(cmem, method = c("FuzME", "McBrathney", "Rahul")){
method = match.arg(method)
C <- ncol(cmem)
N <- nrow(cmem)
# Rahul et al. 2019. https://doi.org/10.1080/03650340.2019.1578345
if(method == "Rahul"){
res <- 1 - (C / (C - 1)) * (1 - sum(cmem^2) / N)
}
# McBrathney & Moore 1985 https://doi.org/10.1016/0168-1923(85)90082-6
if(method == "McBrathney"){
F <- sum(cmem^2) / N
res <- 1 - (C * F - 1) / (F - 1)
}
# FuzME https://precision-agriculture.sydney.edu.au/resources/software/
# MATLAB code file fvalidity.m, downloaded on 11 Nov, 2021
if(method == "FuzME"){
F <- sum(cmem^2) / N
res <- 1 - (C * F - 1) / (C - 1)
}
return(res)
}
NCE <- function(cmem, method = c("FuzME", "McBrathney", "Rahul")){
method = match.arg(method)
C <- ncol(cmem)
N <- nrow(cmem)
if(method == "Rahul"){
res <- (N / (N - C)) * (- sum(cmem * log(cmem)) / N)
}
if(method %in% c("FuzME", "McBrathney")){
H <- -1 / N * sum(cmem * log(cmem))
res <- H / log(C)
}
return(res)
}
Then use those to calculate the indices from the degrees of membership from the cmeans function from the iris dataset.
# prepare variables
cl <- list()
fpi <- nce <- NULL
# cycle through the desired number of clusters
for(i in 2:6){
cl[[i]] <- e1071::cmeans(iris[, -5], i, 20, method = "cmeans")
fpi <- c(fpi, FPI(cl[[i]]$membership, method = "M"))
nce <- c(nce, NCE(cl[[i]]$membership, method = "M"))
}
Last, plot with two different axes in one plot.
# add space for the second axis label
par(mar = c(5,4,1,4) + .1)
# plot FPI
plot(2:6, fpi, lty = 2, pch = 18, type = "b", xlab = "Number of clusters", ylab = "FPI")
# plot NCE, manually adding the second axis
par(new = TRUE)
plot(2:6, nce, lty = 1, pch = 15, type = "b", xlab = "", ylab = "", axes = FALSE)
axis(4, at = pretty(range(nce)))
mtext("NCE", side = 4, line = 3)
# add legend
legend("top", legend = c("FPI", "NCE"), pch = c(18,15), lty = c(2,1), horiz = TRUE)
EDIT1: Updated the functions according to optional equations from two different publications and calculated the example on the iris dataset.
EDIT2: Added code for the FPI and NCE calculations specified in the FuzME MATLAB code available here.
Hope this could help
library(dplyr)
library(ggplot2)
f <- function(cl) {
C <- length(cl$size)
N <- sum(cl$size)
mu <- cl$membership
fpi <- 1 - C / (C - 1) * (1 - sum((mu)^2) / N)
nce <- N / (N - C) * (-sum(log(mu) * mu) / N)
c(FPI = fpi, NCE = nce)
}
data.frame(t(rbind(
K = 2:6,
sapply(
K,
function(k) f(cmeans(x, k, 20, verbose = TRUE, method = "cmeans"))
)
))) %>%
pivot_longer(cols = FPI:NCE, names_to = "Index") %>%
ggplot(aes(x = K, y = value, group = Index)) +
geom_line(aes(linetype = Index, color = Index)) +
geom_point() +
scale_y_continuous(
name = "FPI",
sec.axis = sec_axis(~., name = "NCE")
) +
theme(legend.position = "top")

Determine transects perpendicular to a (coast)line in R

I'd like to automatically derive transects, perpendicular to the coastline. I need to be able to control their length and spacing and their oriëntation needs to be on the "correct" side of the line. I came up with a way to do that, but especially selecting the "correct" (it needs to point to the ocean) can be done better. General approach:
For each line segment in a SpatialLineDataFrame define transect
locations
define transect: in both directions perpendicular to coastline: create points that determine the transect
Create a polygon based on the coastline, add extra points to grow the polygon in a direction that is known and use that to clip the points that are inside (considered as land, and therefore not of interest)
Create transect based on remaining point
Especially part 3 is of interest. I'd like a more robust method to determine the correct orientation of the transect. This is what i'm using now:
library(rgdal)
library(raster)
library(sf)
library(ggplot2)
library(rgeos) # create lines and spatial objects
# create testing lines
l1 <- cbind(c(1, 2, 3), c(3, 2, 2))
l2 <- cbind(c(1, 2, 3), c(1, 1.5, 1))
Sl1 <- Line(l1)
Sl2 <- Line(l2)
S1 <- Lines(list(Sl1), ID = "a")
S2 <- Lines(list(Sl2), ID = "b")
line <- SpatialLines(list(S1, S2))
plot(line)
# for testing:
sep <- 0.1
start <- 0
AllTransects <- vector('list', 100000) # DB that should contain all transects
for (i in 1: length(line)){
# i <- 2
###### Define transect locations
# Define geometry subset
subset_geometry <- data.frame(geom(line[i,]))[, c('x', 'y')]
# plot(SpatialPoints(data.frame(x = subset_geometry[,'x'], y = subset_geometry[,'y'])), axes = T, add = T)
dx <- c(0, diff(subset_geometry[,'x'])) # Calculate difference at each cell comapred to next cell
dy <- c(0, diff(subset_geometry[,'y']))
dseg <- sqrt(dx^2+dy^2) # get rid of negatives and transfer to uniform distance per segment (pythagoras)
dtotal <- cumsum(dseg) # cumulative sum total distance of segments
linelength = sum(dseg) # total linelength
pos = seq(start,linelength, by=sep) # Array with postions numbers in meters
whichseg = unlist(lapply(pos, function(x){sum(dtotal<=x)})) # Segments corresponding to distance
pos=data.frame(pos=pos, # keep only
whichseg=whichseg, # Position in meters on line
x0=subset_geometry[whichseg,1], # x-coordinate on line
y0=subset_geometry[whichseg,2], # y-coordinate on line
dseg = dseg[whichseg+1], # segment length selected (sum of all dseg in that segment)
dtotal = dtotal[whichseg], # Accumulated length
x1=subset_geometry[whichseg+1,1], # Get X coordinate on line for next point
y1=subset_geometry[whichseg+1,2] # Get Y coordinate on line for next point
)
pos$further = pos$pos - pos$dtotal # which is the next position (in meters)
pos$f = pos$further/pos$dseg # fraction next segment of its distance
pos$x = pos$x0 + pos$f * (pos$x1-pos$x0) # X Position of point on line which is x meters away from x0
pos$y = pos$y0 + pos$f * (pos$y1-pos$y0) # Y Position of point on line which is x meters away from y0
pos$theta = atan2(pos$y0-pos$y1,pos$x0-pos$x1) # Angle between points on the line in radians
pos$object = i
###### Define transects
tlen <- 0.5
pos$thetaT = pos$theta+pi/2 # Get the angle
dx_poi <- tlen*cos(pos$thetaT) # coordinates of point of interest as defined by position length (sep)
dy_poi <- tlen*sin(pos$thetaT)
# tabel met alleen de POI informatie
# transect is defined by x0,y0 and x1,y1 with x,y the coordinate on the line
output <- data.frame(pos = pos$pos,
x0 = pos$x + dx_poi, # X coordinate away from line
y0 = pos$y + dy_poi, # Y coordinate away from line
x1 = pos$x - dx_poi, # X coordinate away from line
y1 = pos$y - dy_poi, # X coordinate away from line
theta = pos$thetaT, # angle
x = pos$x, # Line coordinate X
y = pos$y, # Line coordinate Y
object = pos$object,
nextx = pos$x1,
nexty = pos$y1)
# create polygon from object to select correct segment of the transect (coastal side only)
points_for_polygon <- rbind(output[,c('x', 'y','nextx', 'nexty')])# select points
pol_for_intersect <- SpatialPolygons( list( Polygons(list(Polygon(points_for_polygon[,1:2])),1)))
# plot(pol_for_intersect, axes = T, add = T)
# Find a way to increase the polygon - should depend on the shape&direction of the polygon
# for the purpose of cropping the transects
firstForPlot <- data.frame(x = points_for_polygon$x[1], y = points_for_polygon$y[1])
lastForPlot <- data.frame(x = points_for_polygon$x[length(points_for_polygon$x)],
y = points_for_polygon$y[length(points_for_polygon$y)])
plot_first <- SpatialPoints(firstForPlot)
plot_last <- SpatialPoints(lastForPlot)
# plot(plot_first, add = T, col = 'red')
# plot(plot_last, add = T, col = 'blue')
## Corners of shape dependent bounding box
## absolute values should be depended on the shape beginning and end point relative to each other??
LX <- min(subset_geometry$x)
UX <- max(subset_geometry$x)
LY <- min(subset_geometry$y)
UY <- max(subset_geometry$y)
# polygon(x = c(LX, UX, UX, LX), y = c(LY, LY, UY, UY), lty = 2)
# polygon(x = c(LX, UX, LX), y = c(LY, LY, UY), lty = 2)
# if corners are changed to much the plot$near becomes a problem: the new points are to far away
# Different points are selected
LL_corner <- data.frame(x = LX-0.5, y = LY - 1)
LR_corner <- data.frame(x = UX + 0.5 , y = LY - 1)
UR_corner <- data.frame(x = LX, y = UY)
corners <- rbind(LL_corner, LR_corner)
bbox_add <- SpatialPoints(rbind(LL_corner, LR_corner))
# plot(bbox_add ,col = 'green', axes = T, add = T)
# Select nearest point for drawing order to avoid weird shapes
firstForPlot$near <-apply(gDistance(bbox_add,plot_last, byid = T), 1, which.min)
lastForPlot$near <- apply(gDistance(bbox_add,plot_first, byid = T), 1, which.min)
# increase polygon with corresponding points
points_for_polygon_incr <- rbind(points_for_polygon[1:2], corners[firstForPlot$near,], corners[lastForPlot$near,])
pol_for_intersect_incr <- SpatialPolygons( list( Polygons(list(Polygon(points_for_polygon_incr)),1)))
plot(pol_for_intersect_incr, col = 'blue', axes = T)
# Coordinates of points first side
coordsx1y1 <- data.frame(x = output$x1, y = output$y1)
plotx1y1 <- SpatialPoints(coordsx1y1)
plot(plotx1y1, add = T)
coordsx0y0 <- data.frame(x = output$x0, y = output$y0)
plotx0y0 <- SpatialPoints(coordsx0y0)
plot(plotx0y0, add = T, col = 'red')
# Intersect
output[, "x1y1"] <- over(plotx1y1, pol_for_intersect_incr)
output[, "x0y0"] <- over(plotx0y0, pol_for_intersect_incr)
x1y1NA <- sum(is.na(output$x1y1)) # Count Na
x0y0NA <- sum(is.na(output$x1y1)) # Count NA
# inefficient way of selecting the correct end point
# e.g. either left or right, depending on intersect
indexx0y0 <- with(output, !is.na(output$x0y0))
output[indexx0y0, 'endx'] <- output[indexx0y0, 'x1']
output[indexx0y0, 'endy'] <- output[indexx0y0, 'y1']
index <- with(output, is.na(output$x0y0))
output[index, 'endx'] <- output[index, 'x0']
output[index, 'endy'] <- output[index, 'y0']
AllTransects = rbind(AllTransects, output)
}
# Create the transects
lines <- vector('list', nrow(AllTransects))
for(n in 1: nrow(AllTransects)){
# n = 30
begin_coords <- data.frame(lon = AllTransects$x, lat = AllTransects$y) # Coordinates on the original line
end_coords <- data.frame(lon = AllTransects$endx, lat = AllTransects$endy) # coordinates as determined by the over: remove implement in row below by selecting correct column from output
col_names <- list('lon', 'lat')
row_names <- list('begin', 'end')
# dimnames < list(row_names, col_names)
x <- as.matrix(rbind(begin_coords[n,], end_coords[n,]))
dimnames(x) <- list(row_names, col_names)
lines[[n]] <- Lines(list(Line(x)), ID = as.character(n))
}
lines_sf <- SpatialLines(lines)
# plot(lines_sf)
df <- SpatialLinesDataFrame(lines_sf, data.frame(AllTransects))
plot(df, axes = T)
As long as i'm able to correctly define the bounding box and grow the polygon correctly this works. But I'd like to try this on multiple coastlines and parts of coastlines, each with its own orientation. In the example below the growing of the polygon is made for the bottom coastline segment, as a result the top one has transects in the wrong direction.
Anybody has an idea in what directio to look? I was considering to perhaps use external data but when possible i'd like to avoid that.
I used your code for my question (measure line inside a polygon) but maybe this works for you:
Took a spatial polygon or line
Extract the coordinates of the element
Make a combination of coordinates to create straight lines, from with you can derivate perpendicular lines (e.g. ((x1,x3)(y1, y3)) or ((x2,x4)(y2, y4)) )
Iterate along with all the pairs of coordinates
Apply the code you did, especially the result of the 'output' table.
I did this for a polygon, so I could generate perpendicular lines based on the straight line I create taking an arbitrary (1, 3) set of coordinates.
#Define a polygon
pol <- rip[1, 1] # I took the first polygon from my Shapefile
polcoords <- pol#polygons[[1]]#Polygons[[1]]#coords
# define how to create your coords pairing. My case: 1st with 3rd, 2nd with 4th, ...
pairs <- data.frame(a = 1:( nrow(polcoords) - 1),
b = c(2:(nrow(polcoords)-1)+1, 1) )
# Empty list to store the lines
lnDfls <- list()
for (j in 1:nrow(pairs)){ # j = 1
# Select the pairs
pp <- polcoords[c(pairs$a[j], pairs$b[j]), ]
#Extract mean coord, from where the perp. line will start
midpt <- apply(pp, 2, mean)
# points(pp, col = 3, pch = 20 )
# points(midpt[1], midpt[2], col = 4, pch = 20)
x <- midpt[1]
y <- midpt[2]
theta = atan2(y = pp[2, 2] - pp[1, 2], pp[2, 1] - pp[1, 1]) # Angle between points on the line in radians
# pos$theta = atan2(y = pos$y0-pos$y1 , pos$x0-pos$x1) # Angle between points on the line in radians
###### Define transects
tlen <- 1000 # distance in m
thetaT = theta+pi/2 # Get the angle
dx_poi <- tlen*cos(thetaT) # coordinates of point of interest as defined by position length (sep)
dy_poi <- tlen*sin(thetaT)
# tabel met alleen de POI informatie
# transect is defined by x0,y0 and x1,y1 with x,y the coordinate on the line
output2 <- data.frame(#pos = pos,
x0 = x + dx_poi, # X coordinate away from line
y0 = y + dy_poi, # Y coordinate away from line
x1 = x - dx_poi, # X coordinate away from line
y1 = y - dy_poi # X coordinate away from line
#theta = thetaT, # angle
#x = x, # Line coordinate X
#y = y # Line coordinate Y
)
# points(output2$x1, output2$y1, col = 2)
#segments(x, y, output2$x1[1], output2$y1[1], col = 2)
mat <- as.matrix(cbind( c( x, output2$x1[1] ) , c( y, output2$y1[1] ) ))
LL <- Lines(list(Line( mat )), ID = as.character(j))
# plot(SpatialLinesDataFrame(LL, data.frame (a = 1)), add = TRUE, col = 2)
# plot(SpatialLines(list(LL)), add = TRUE, col = 2)
#lnList[[j]] <- LL
lnDfls[[j]] <- SpatialLinesDataFrame( SpatialLines(LinesList = list(LL)) ,
match.ID = FALSE,
data.frame(id = as.character(j ) ) )
# line = st_sfc(st_linestring(mat))
# st_length(line)
# ln <- (SpatialLines(LinesList = list(LL)))
# lndf <- SpatialLinesDataFrame( lndf , data.frame(id = j ))
# sf::st_length(ln)
# # plot(lines_sf)
}
compDf <- do.call(what = sp::rbind.SpatialLines, args = lnDfls)
plot(pol)
plot(compDf, add = TRUE, col = 2)
plot(inDfLn, add = TRUE, col = 3)

R Programming other alternatives for plot

I wonder how you can simplify these two :
plot (payroll,wins)
id = identify(payroll, wins,labels = code, n = 5)
plot (payroll,wins)
with(data, text(payroll, wins, labels = code, pos = 1, cex=0.5))
using other alternatives - pch() dan as.numeric()?
Not sure it's easier but you change pch during identification as below (taken from the R-help). Every time you click empty point change to filled-in dot.
# data simulation
data <- data.frame(payroll = rnorm(10), wins = rnorm(10), code = letters[1:10])
identifyPch <- function(x, y = NULL, n = length(x), plot = FALSE, pch = 19, ...)
{
xy <- xy.coords(x, y)
x <- xy$x
y <- xy$y
sel <- rep(FALSE, length(x))
while (sum(sel) < n) {
ans <- identify(x[!sel], y[!sel], labels = which(!sel), n = 1, plot = plot, ...)
if(!length(ans)) {
break
}
ans <- which(!sel)[ans]
points(x[ans], y[ans], pch = pch)
sel[ans] <- TRUE
}
## return indices of selected points
which(sel)
}
if(dev.interactive()) { ## use it
with(data, plot(payroll,wins))
id = with(data, identifyPch(payroll, wins))
}

R Corrgram showing frequency pairs that have zero abundance 'Pie Method'

I am attempting to reproduce a corrgram (below; Fig 1) using Zuur et al (2010) reproducible R code (below) showing the frequency with which pairs of water- bird species both have zero abundance. The colour and the amount that a circle has been filled correspond to the proportion of observa- tions with double zeros. The diagonal running from bottom left to the top right represents the percentage of observations of a variable equal to zero..
I have adapted this code for my data but I am experiencing the same problem after running the code for both datasets. When I run the code, the circles inside the corrgram are not filling in, and remain empty (below; Figure 2).
I am however confused as to why I am hitting this problem. If anyone has a solution as to why this occurs, then I would be deeply appreciative for your help.
Data: By Zuur et al (2010)
The data is too large to include with this post but it can be found in the supporting materials section called ElphickBirdData.txt
R Code: Zuur et al (2010)
RiceField <- read.table(file="ElphickBirdData.txt", header = TRUE)
AllS <- c(
"TUSW", "GWFG", "WHGO", "CAGO", "MALL",
"GADW", "GWTE", "CITE", "UNTE", "AMWI", "NOPI",
"NOSH", "RIDU", "CANV", "BUFF", "WODU", "RUDU",
"EUWI", "UNDU", "PBGB", "SORA", "COOT", "COMO",
"AMBI", "BCNH", "GBHE", "SNEG", "GREG", "WFIB",
"SACR", "AMAV", "BNST", "BBPL", "KILL", "LBCU",
"GRYE", "LEYE", "LBDO", "SNIP", "DUNL", "WESA",
"LESA", "PEEP", "RUFF", "UNSH", "RBGU", "HEGU",
"CAGU", "GUSP")
#Determine species richness
Richness <- colSums(RiceField[,AllS] > 0, na.rm = TRUE)
#Remove all covariates
Birds <- RiceField[,AllS]
#To reduce the of variables in the figure, we only used the
#20 species that occured at more than 40 sites.
#As a result, N = 20. Else it becomes a mess.
Birds2 <- Birds[, Richness > 40]
N <- ncol(Birds2)
AllNames <- names(Birds2)
A <- matrix(nrow = N, ncol = N)
for (i in 1:N){
for (j in 1:N){
A[i,j] <- sum(RiceField[,AllS[i]]==0 & RiceField[,AllS[j]]==0, na.rm=TRUE)
}}
A1 <- A/2035
print(A1, digits = 2)
rownames(A1) <- AllNames
colnames(A1) <- AllNames
library(lattice)
library(RColorBrewer)
panel.corrgram.2 <- function(x, y, z, subscripts, at = pretty(z), scale = 0.8, ...)
{
require("grid", quietly = TRUE)
x <- as.numeric(x)[subscripts]
y <- as.numeric(y)[subscripts]
z <- as.numeric(z)[subscripts]
zcol <- level.colors(z, at = at, ...)
for (i in seq(along = z))
{
lims <- range(0, z[i])
tval <- 2 * base::pi *
seq(from = lims[1], to = lims[2], by = 0.01)
grid.polygon(x = x[i] + .5 * scale * c(0, sin(tval)),
y = y[i] + .5 * scale * c(0, cos(tval)),
default.units = "native",
gp = gpar(fill = zcol[i]))
grid.circle(x = x[i], y = y[i], r = .5 * scale,
default.units = "native")
}
}
levelplot(A1,xlab=NULL,ylab=NULL,
at=do.breaks(c(0.5,1.01),101),
panel=panel.corrgram.2,
scales=list(x=list(rot=90)),
colorkey=list(space="top"),
col.regions=colorRampPalette(c("red","white","blue")))
#Grey colours
levelplot(A1.bats,xlab=NULL,ylab=NULL,
at=do.breaks(c(0.5,1.01),101),
panel=panel.corrgram.2,
scales=list(x=list(rot=90)),
colorkey=list(space="top"),
col.regions=colorRampPalette(c(grey(0.8),grey(0.5),grey(0.2))))
Figure 1.
Figure 2
The cause of your problem is that grid.circles daubs grid.polygon with white. You can solved it by changing order of grid.circle and grid.polygon (or add gp = gpar(fill=NA) to grid.circle() ).
panel.corrgram.2.2 <- function(x, y, z, subscripts, at = pretty(z), scale = 0.8, ...)
{
require("grid", quietly = TRUE)
x <- as.numeric(x)[subscripts]
y <- as.numeric(y)[subscripts]
z <- as.numeric(z)[subscripts]
zcol <- level.colors(z, at = at, ...)
for (i in seq(along = z))
{
lims <- range(0, z[i])
tval <- 2 * base::pi *
seq(from = lims[1], to = lims[2], by = 0.01)
grid.circle(x = x[i], y = y[i], r = .5 * scale, # change the order
default.units = "native")
grid.polygon(x = x[i] + .5 * scale * c(0, sin(tval)),
y = y[i] + .5 * scale * c(0, cos(tval)),
default.units = "native",
gp = gpar(fill = zcol[i]))
}
}
levelplot(A1,xlab=NULL,ylab=NULL,
at=do.breaks(c(0.5,1.01),101),
panel=panel.corrgram.2.2,
scales=list(x=list(rot=90)),
colorkey=list(space="top"),
col.regions=colorRampPalette(c("red","white","blue")))

Resources