I need to create an function that will:
search for the pixel in a raster containing the smallest value;
in the first iteration, assign all the pixels within the radius equal to raster the cell size (2.5km), a value of 1 (including the pixel with the smallest value)
in the second iteration, select the pixel with the next smallest value (excluding pixels selected in step ii) and search the same radius and assign these a value of 2. This continues untill there are no more pixels left (if there are no free pixels within the radius, the selection stops)
Sounds complex but hopefully possible? Here is an example of my raster:
xy <- matrix(pnorm(900,40, 200),30,30)image(xy)
rast <- raster(xy)
# Give it lat/lon coords for 36-37°E, 3-2°S
extent(rast) <- c(36,37,-3,-2)
Perhaps you can use the below. I would not try this on very large rasters (that will take forever). But for your example it works fine --- if you do not have to do this too many times.
library(raster)
set.seed(0)
xy <- matrix(rnorm(900, 40, 200),30 , 30)
r <- raster(xy)
extent(r) <- c(36,37,-3,-2)
rorig <- r
x <- r
i <- 1
while (TRUE) {
# cell with min value
m <- which.min(x)
## are there, and do you care about ties? Do they have the same level?
## If not, you can do
## m[1]
## or sample
## m <- sample(m, 1)
# focal and four adjacent cells
a <- adjacent(r, m, 4, FALSE, include=TRUE)
# exclude those that have already been affected
w <- which(!is.na(x[a]))
a <- a[w]
# assign the value
r[a] <- i
# set assigned cells to NA
x[a] <- NA
# stop when done
if (is.na(maxValue(x))) break
i <- i + 1
}
plot(r)
plot(rorig, r)
Related
I'm trying to calculate a measure akin to the moment of inertia using a raster layer and I am struggling to figure out how to get the distance of each cell to a patch's centroid and then extracting both that distance and the cell's value.
I want to calculate the moment of inertia (get the squared distance of each cell to its patches centroid, multiply by value of cell, sum these values by patch, and then divide by the sum of all values per patch). I provide a simplified set-up below. The code creates a simple raster layer, patches clusters of cells, and gets their centroids. I know that the function in question to use next is probably terra::distance (maybe in combination with terra::zonal?!) -- how do I calculate the distance by patch?
#lonlat
library(terra)
r <- rast(ncols=36, nrows=18, crs="+proj=longlat +datum=WGS84")
r[498:500] <- 1
r[3:6] <- 1
r[111:116] <- 8
r[388:342] <- 1
r[345:349] <- 3
r_patched <- patches(r, directions = 8, allowGaps = F)
testvector <- terra::as.polygons(r_patched, trunc=T, dissolve = T)
p_centr <- geom(centroids(testvector), df=T)
##next steps
#1. get distance of each cell from patch's centroid
#r <- distance(r)
#2. multiply cell value by squared distance to centroid
I think you need to loop over the patches. Something like this:
p_centr <- centroids(testvector)
v <- rep(NA, length(p_centr))
for (i in 1:length(p_centr)) {
x <- ifel(r_patched == p_centr$patches[i], i, NA)
x <- trim(x)
d <- distance(x, p_centr[i,])
d <- mask(d, x)
# square distance and multiply with cell values
d <- d^2 * crop(r, d)
v[i] <- global(d, "sum", na.rm=TRUE)[[1]]
}
v / sum(v)
#[1] 1.213209e-05 1.324495e-02 9.864759e-01 2.669833e-04
I am attempting to create a disc for each point in a pattern; each disc will have the same radius. Then for each disc, I want to count the number of points falling within the disc. Each pattern has 100-400 points. I have written code to do this, but it is quite slow. The code is below. I cannot provide the shapefile and points as that would be very difficult, but I could create some dummy data if need be.
W <- as.owin(shape)
#Converts created .shp file into a "window"
#in which everything is plotted and calculated
SPDF <- SpatialPointsDataFrame(P[,1:2], P)
#Converts data frame to spatial points data frame
SP <- as(SPDF, "SpatialPoints") #Converts SPDF to spatial points
SP1 <- as.ppp(coordinates(SP), W)
SP2 <- as.ppp(SP1)
attr(SP1, "rejects")
attr(SP2, "rejects")
aw <- area.owin(W) #Area, in pixels squared, of leaf window created earlier
#awm <- aw * (meas)^2 * 100 #Area window in millimeters squared
# Trichome_Density_Count-----------------------------------------------------------------------------------------------
TC <- nrow(P) #Counts number of rows in XY data points file,
#this is number of trichomes from ImageJ
TD <- TC/awm #Trichome density, trichomes per mm^2
#SPDF2 <- as.SpatialPoints.ppp(SP2)
#kg <- knn.graph(SPDF2, k = 1)
#Creates the lines connecting each NND pairwise connection
#dfkg <- data.frame(kg) #Converts lines into a data frame
#dfkgl <- dfkg$length
meanlength <- 78
discstest <- discs(SP2, radii = meanlength,
separate = TRUE, mask = FALSE, trim = FALSE,
delta = NULL, npoly=NULL)
#Function creates discs for each trichome
#Using nearest neighbor lengths as radii
#NEED TO ADD CLIPPING
ratiolist <- c()
for (i in 1:length(discstest)) {
ow2sp <- owin2SP(discstest[[i]])
leafsp <- owin2SP(W)
tic("gIntersection")
intersect <- rgeos::gIntersection(ow2sp, leafsp)
Sys.sleep(1)
toc()
tic("over")
res <- as.data.frame(sp::over(SP, intersect, returnList = FALSE))
Sys.sleep(1)
toc()
res[is.na(res)] <- 0
newowin <- as.owin(intersect)
circarea <- area.owin(newowin)
trichactual <- sum(res)
trichexpect <- (TC / aw) * circarea
ratio <- trichactual / trichexpect
ratiolist[[i]] <- ratio
}
If I understand you correctly you want to loop through each point and check how many points fall within a disc of radius R centered in that point. This is done very efficiently in spatstat with the function closepaircounts:
closepaircounts(SP2, r = meanlength)
This simply returns a vector with the number of points contained in the disc of radius r for each point in SP2.
I have just tried this for 100,000 points where each point on average had almost 3000 other points in the disc around it, and it took 8 seconds on my laptop. If you have many more points or in particular if the disc radius is so big that each disc contains many more points it may become very slow to calculate this.
I have a stack of 4 rasters. I would like the average correlation through time between a pixel and each of its 8 neighbors.
some data:
library(raster)
r1=raster(matrix(runif(25),nrow=5))
r2=raster(matrix(runif(25),nrow=5))
r3=raster(matrix(runif(25),nrow=5))
r4=raster(matrix(runif(25),nrow=5))
s=stack(r1,r2,r3,r4)
so for a pixel at position x, which has 8 neighbors at the NE, E, SE, S etc positions, I want the average of
cor(x,NE)
cor(x,E)
cor(x,SE)
cor(x,S)
cor(x,SW)
cor(x,W)
cor(x,NW)
cor(x,N)
and the average value saved at position x in the resulting raster. The edge cells would be NA or, if possible a flag to calculate the average correlation just with the cells it touches (either 3 or 5 cells).
Thanks!
I don't believe #Pascal's suggestion of using focal() could work because focal() takes a single raster layer as an argument, not a stack. This is the solution that is easiest to understand. It could be made more efficient by minimizing the number of times you extract values for each focal cell:
library(raster)
set.seed(2002)
r1 <- raster(matrix(runif(25),nrow=5))
r2 <- raster(matrix(runif(25),nrow=5))
r3 <- raster(matrix(runif(25),nrow=5))
r4 <- raster(matrix(runif(25),nrow=5))
s <- stack(r1,r2,r3,r4)
## Calculate adjacent raster cells for each focal cell:
a <- adjacent(s, 1:ncell(s), directions=8, sorted=T)
## Create column to store correlations:
out <- data.frame(a)
out$cors <- NA
## Loop over all focal cells and their adjacencies,
## extract the values across all layers and calculate
## the correlation, storing it in the appropriate row of
## our output data.frame:
for (i in 1:nrow(a)) {
out$cors[i] <- cor(c(s[a[i,1]]), c(s[a[i,2]]))
}
## Take the mean of the correlations by focal cell ID:
r_out_vals <- aggregate(out$cors, by=list(out$from), FUN=mean)
## Create a new raster object to store our mean correlations in
## the focal cell locations:
r_out <- s[[1]]
r_out[] <- r_out_vals$x
plot(r_out)
When extracting values of a raster along a SpatialLine in R, how to relate these values to the actual distance along this line?
Suppose I want to extract the value of the R logo along the following line:
library(raster)
r <- raster(system.file("external/rlogo.grd", package="raster"))
x=c(5, 95)
y=c(20, 50)
line = SpatialLines(list(Lines(Line(cbind(x,y)), ID="a")))
plot(r)
plot(line, add=TRUE)
I can extract the values and plot them - but how to replace the x values (1:length(vals) below) by the actual distance (starting e.g. at 0 from the left side of the line)?
vals <- extract(r, line)[[1]]
plot(1:length(vals), vals, type='o')
I could combine the extraction of the cells with xyFromCell to get the coordinates of the extracted cells as suggested here, but it is not clear to me how to go further.
I'm not sure what you're exactly asking, but if you looking for distances between the leftmost coordinate of the line segment and the centres of the cells which the line passes through, then you can find the distances like this:
x <- extract(r, l, cellnumbers=TRUE)[[1]]
xy <- xyFromCell(r, x[,1]) # get cell coordinates where the line passes
start <- xy[which.min(xy[,1]),] # leftmost coordinate of the line
d <- apply(xy, 1, function(x, start) sqrt(sum((x-start)^2)), start=start) # find distances between the line segment start and the cells
plot(1:length(d), d, type='o')
Here is a solution (partly on the basis of #jvj's input) through an attempt to compute the orthogonal projections of the cell centres provided by raster::extract on the line and then compute the distances along the line.
(This is an R-beginners script, likely easily improvable, but seems to work (and is of course only for rasters with projection respecting distances))
vals <- extract(r, line, cellnumbers=TRUE)[[1]]
cellsxy <- xyFromCell(r, vals[,1]) # coordinates of intersected cells (likely not ON the line)
linexy = spsample(line, 1000, "regular") # get the line as points
linexy <- matrix(cbind(linexy$x, linexy$y), ncol=2) # easier than Spatial object for later
orthoproj <- c() # to store the orthogonal projections of cells centres on the line
for (i in 1:nrow(cellsxy)) {
xypt = cellsxy[i,]
min.index <- which.min(spDistsN1(linexy, xypt))
orthopt <- linexy[min.index, ] # orthogonal projections = smaller distance
orthoproj <- c(orthoproj, c(orthopt[1], orthopt[2]))
}
orthoproj <- matrix(orthoproj, ncol=2, byrow=T)
orthoproj <- data.frame(x=orthoproj[,1], y=orthoproj[,2])
orthoproj <- orthoproj[order(orthoproj[,1]),] # reorder with increasing distance
orthoproj <- data.frame(x=orthoproj$x, y=orthoproj$y)
start <- linexy[which.min(linexy[,1]),] # leftmost coordinate of the line
dists <- apply(orthoproj, 1,
function(xy, start) sqrt(sum((xy-start)^2)),
start=start) # distances between 'start' and the orthogonal projections
plot(dists, rev(vals[,2]), type='o') # !! beware: order of 'vals' and 'dists'
# depending on the order in which cellnumbers are returned
# in raster::extract and the shape of your line !!
I have 2 lists with X,Y coordinates of points.
List 1 contains more points than list 2.
The task is to find pairs of points in a way that the overall euclidean distance is minimized.
I have a working code, but i don't know if this is the best way and I would like to get hint what I can improve for result (better algorithm to find the minimum ) or speed, because the list are about 2000 elements each.
The round in the sample vectors is implemented to get also points with same distances.
With the "rdist" function all distances are generated in "distances". Than the minimum in the matrix is used to link 2 point ("dist_min"). All distances of these 2 points are now replaced by NA and the loop continues by searching the next minimum until all points of list 2 have a point from list 1.
At the end I have added a plot for visualization.
require(fields)
set.seed(1)
x1y1.data <- matrix(round(runif(200*2),2), ncol = 2) # generate 1st set of points
x2y2.data <- matrix(round(runif(100*2),2), ncol = 2) # generate 2nd set of points
distances <- rdist(x1y1.data, x2y2.data)
dist_min <- matrix(data=NA,nrow=ncol(distances),ncol=7) # prepare resulting vector with 7 columns
for(i in 1:ncol(distances))
{
inds <- which(distances == min(distances,na.rm = TRUE), arr.ind=TRUE)
dist_min[i,1] <- inds[1,1] # row of point(use 1st element of inds if points have same distance)
dist_min[i,2] <- inds[1,2] # column of point (use 1st element of inds if points have same distance)
dist_min[i,3] <- distances[inds[1,1],inds[1,2]] # distance of point
dist_min[i,4] <- x1y1.data[inds[1,1],1] # X1 ccordinate of 1st point
dist_min[i,5] <- x1y1.data[inds[1,1],2] # Y1 coordinate of 1st point
dist_min[i,6] <- x2y2.data[inds[1,2],1] # X2 coordinate of 2nd point
dist_min[i,7] <- x2y2.data[inds[1,2],2] # Y2 coordinate of 2nd point
distances[inds[1,1],] <- NA # remove row (fill with NA), where minimum was found
distances[,inds[1,2]] <- NA # remove column (fill with NA), where minimum was found
}
# plot 1st set of points
# print mean distance as measure for optimization
plot(x1y1.data,col="blue",main="mean of min_distances",sub=mean(dist_min[,3],na.rm=TRUE))
points(x2y2.data,col="red") # plot 2nd set of points
segments(dist_min[,4],dist_min[,5],dist_min[,6],dist_min[,7]) # connect pairwise according found minimal distance
This is a fundamental problem in combinatorial optimization known as the assignment problem. One approach to solving the assignment problem is the Hungarian algorithm which is implemented in the R package clue:
require(clue)
sol <- solve_LSAP(t(distances))
We can verify that it outperforms the naive solution:
mean(dist_min[,3])
# [1] 0.05696033
mean(sqrt(
(x2y2.data[,1] - x1y1.data[sol, 1])^2 +
(x2y2.data[,2] - x1y1.data[sol, 2])^2))
#[1] 0.05194625
And we can construct a similar plot to the one in your question:
plot(x1y1.data,col="blue")
points(x2y2.data,col="red")
segments(x2y2.data[,1], x2y2.data[,2], x1y1.data[sol, 1], x1y1.data[sol, 2])