R point in polygon speed slow - r

The function gIntersects() in rgeos package to test if point is located in polygons is pretty slow. Is there a good way to speed up the computation?

I tried a speed comparison between sp::point.in.polygon, sp::over and rgeos::gIntersects() which were already mentioned in the comments. Note that there is also a point.in.poly function in {spatialEco} but seems that is just a wrapper of sp::over.
I realized that sp::point.in.polygon doesn't handle well multi-part polygons (as also pointed here) and needs to be provided with raw coordinates (so I presume that for multi-part polygons needs to be used in a loop). Note that, sp::point.in.polygon it was faster than all others only in the case of a square polygon, which makes me think is faster only for simpler shapes. All in all, whenever hitting some speed issues, juts try to test for your specific case. For my specific choice of examples, sp::over seems a better choice overall, but I would not generalize. Hope my examples are ok, otherwise feel free to correct me.
Since there is no data provided, I used some examples below.
Testing with world map data
Prepare data & functions
library(rgeos)
library(sp)
library(microbenchmark)
library(ggplot2)
library(maps)
library(maptools)
library(raster)
# Get world map data
# (conversion code from "Applied Spatial Data Analysis with R")
worldmap <- maps::map("world", fill=TRUE, plot=FALSE)
# transform to SpatialPolygons
worldmapPolys <- maptools::map2SpatialPolygons(worldmap,
IDs=sapply(strsplit(worldmap$names, ":"), "[", 1L),
proj4string=CRS("+proj=longlat +datum=WGS84"))
# Generate random points for entire world
set.seed(2017)
pts <- sp::spsample(worldmapPolys, n=10^5, type="random")
# Define functions to test for speed
gIntersects_tst <- function(my.pts, my.poly){
rgeos::gIntersects(spgeom1 = my.pts,
spgeom2 = my.poly,
byid = TRUE)
}
over_tst <- function(my.pts, my.poly){
sp::over(x = my.pts, y = my.poly)
}
point.in.polygon_tst <- function(my.pts, my.poly){
# get coordinates from polygon
XY <- raster::geom(my.poly)
sp::point.in.polygon(point.x = my.pts#coords[,1],
point.y = my.pts#coords[,2],
pol.x = XY[,5],
pol.y = XY[,6],
mode.checked = TRUE)
}
Testing for single-part polygon
# Micro-benchmarking
# The idea is to test which points fall into a selected polygon (country)
res <- microbenchmark(TF1 <- gIntersects_tst(pts, worldmapPolys[183,]),
TF2 <- gIntersects_tst(worldmapPolys[183,], pts),
idx <- over_tst(pts, worldmapPolys[183,]),
codes <- point.in.polygon_tst(pts, worldmapPolys[183,]))
print(res)
## Unit: milliseconds
## expr min
## TF1 <- gIntersects_tst(pts, worldmapPolys[183, ]) 142.61992
## TF2 <- gIntersects_tst(worldmapPolys[183, ], pts) 125.99551
## idx <- over_tst(pts, worldmapPolys[183, ]) 50.72425
## codes <- point.in.polygon_tst(pts, worldmapPolys[183, ]) 224.57961
## lq mean median uq max neval cld
## 153.46915 174.42346 162.90885 177.69223 338.2691 100 b
## 136.13762 158.88218 144.89180 156.91664 352.3276 100 b
## 55.50899 69.67542 63.80366 78.12026 132.8704 100 a
## 243.12288 276.71458 257.38068 275.46144 589.9082 100 c
ggplot2::autoplot(res) + ggtitle("single-polygon: 100 evaluations")
Note that for gIntersects(), order of arguments seems to matter. Differences are both in speed and structure of results.
identical(TF1,TF2)
## [1] FALSE
identical(TF1[,1:length(pts)], TF2[1:length(pts),])
## [1] TRUE
class(TF1); str(TF1)
## [1] "matrix"
## logi [1, 1:100000] FALSE FALSE FALSE FALSE FALSE FALSE ...
## - attr(*, "dimnames")=List of 2
## ..$ : chr "Romania"
## ..$ : chr [1:100000] "1" "2" "3" "4" ...
class(TF2); str(TF2)
## [1] "matrix"
## logi [1:100000, 1] FALSE FALSE FALSE FALSE FALSE FALSE ...
## - attr(*, "dimnames")=List of 2
## ..$ : chr [1:100000] "1" "2" "3" "4" ...
## ..$ : chr "Romania"
# Subset world points
pts.gI1 <- pts[TF1,]
pts.gI2 <- pts[TF2,]
pts.ovr <- pts[!is.na(idx),]
pts.PiP <- pts[as.logical(codes),]
# All subsets are identical
identical(pts.gI1, pts.gI2)
## [1] TRUE
identical(pts.gI2, pts.ovr)
## [1] TRUE
identical(pts.ovr, pts.PiP)
## [1] TRUE
Simpler shapes - test with two square polygons
# Generate two square polygons
grd <- sp::GridTopology(c(1,1), c(1,1), c(2,1))
polys <- sp::as.SpatialPolygons.GridTopology(grd)
# Generate some random points
set.seed(2017)
pts2 <- sp::spsample(polys, n=10^5, type="random")
# Micro-benchmarking
# Test only for those points falling in first square
res <- microbenchmark(TF1 <- gIntersects_tst(pts2, polys[1,]),
TF2 <- gIntersects_tst(polys[1,], pts2),
idx <- over_tst(pts2, polys[1,]),
codes <- point.in.polygon_tst(pts2, polys[1,]))
print(res)
## Unit: milliseconds
## expr min lq
## TF1 <- gIntersects_tst(pts2, polys[1, ]) 151.35336 165.23526
## TF2 <- gIntersects_tst(polys[1, ], pts2) 123.26241 135.90883
## idx <- over_tst(pts2, polys[1, ]) 54.84891 63.89454
## codes <- point.in.polygon_tst(pts2, polys[1, ]) 9.39330 10.66513
## mean median uq max neval cld
## 189.67848 177.62808 190.89566 365.92728 100 d
## 157.47151 148.50073 160.37567 314.02700 100 c
## 76.42608 70.66998 79.81225 240.55570 100 b
## 14.09199 11.37738 16.88741 46.19245 100 a
ggplot2::autoplot(res) + ggtitle("square polygon: 100 evaluations")
pts2.gI1 <- pts2[TF1,]
pts2.gI2 <- pts2[TF2,]
pts2.ovr <- pts2[!is.na(idx),]
pts2.PiP <- pts2[as.logical(codes),]
# All subsets are identical
identical(pts2.gI1, pts2.gI2)
## [1] TRUE
identical(pts2.gI2, pts2.ovr)
## [1] TRUE
identical(pts2.ovr, pts2.PiP)
## [1] TRUE
Session Info
sessionInfo()
R version 3.3.2 (2016-10-31)
Platform: x86_64-w64-mingw32/x64 (64-bit)
Running under: Windows 7 x64 (build 7601) Service Pack 1
locale:
[1] LC_COLLATE=English_United States.1252 LC_CTYPE=English_United States.1252 LC_MONETARY=English_United States.1252
[4] LC_NUMERIC=C LC_TIME=English_United States.1252
attached base packages:
[1] stats graphics grDevices utils datasets methods base
other attached packages:
[1] raster_2.5-8 mapview_1.2.0 leaflet_1.1.0 maptools_0.9-2 maps_3.1.1
[6] ggplot2_2.2.1 microbenchmark_1.4-2.1 sp_1.2-4 rgeos_0.3-23

Related

Still some randomness with sp::spsample(..., ..., type='regular')

I am building a sp::SpatialLines using spsample. In the doc, it is written for spsample(x, n, type, ...):
type: character; "random" for completely spatial random; "regular" for regular (systematically aligned) sampling; [...]
Yet, I just realized that successively created lines with spsample and type='regular' between the very same two points were not identical:
library(sp)
set.seed(12)
for (i in 1:10) {
p1 = c(400000, 401000)
p2 = c(5600000, 5601000)
l1 = as.data.frame(spsample(SpatialLines(list(Lines(Line(cbind(p1, p2)), ID="a"))),
10000, "regular"))
l2 = as.data.frame(spsample(SpatialLines(list(Lines(Line(cbind(p1, p2)), ID="a"))),
10000, "regular"))
print(all.equal(l1, l2))
}
# [1] "Component “p1”: Mean relative difference: 1.8687e-07"
# [1] "Component “p1”: Mean relative difference: 1.680998e-07"
# [1] "Component “p1”: Mean relative difference: 3.382085e-08"
# [1] "Component “p1”: Mean relative difference: 1.155756e-07"
# [1] TRUE
# [1] "Component “p1”: Mean relative difference: 1.051644e-07"
# [1] TRUE
# [1] "Component “p1”: Mean relative difference: 4.354955e-08"
# [1] "Component “p1”: Mean relative difference: 2.074916e-08"
# [1] "Component “p1”: Mean relative difference: 1.380726e-07"
I have been fighting hard in my code to understand why measures of distances between (what should be) two same points and (what should be) two same lines did not give strictly identical results.
Any idea why is this so, and how to ensure consistent results between successive runs? (or: any alternative to build two identical lines in a similar spirit as above?)
That's some weird behavior. Although, if you put the seed before both samples, it will have no differences. Hence, it is probably due to the origin of the regular sampling varying slightly in the different runs.
....
set.seed(12)
l1 = as.data.frame(spsample(SpatialLines(list(Lines(Line(cbind(p1, p2)), ID="a"))),
10000, "regular"))
set.seed(12)
l2 = as.data.frame(spsample(SpatialLines(list(Lines(Line(cbind(p1, p2)), ID="a"))),
10000, "regular"))
....
# [1] TRUE
# [1] TRUE
# [1] TRUE
....
sf as an alternative to sp
As I have become a big fan of the sf Package I tested whether this would have the same issues. Turns out it doesn't:
(don't get confused, there are some conversions between sf and sp objects, in order to stick as close to the code given in OP)
library(sf)
library(dplyr)
library(sp)
set.seed(12)
for (i in 1:10) {
p1 <- c(400000, 401000)
p2 <- c(5600000, 5601000)
l1 <- as.data.frame(
st_as_sf(SpatialLines(list(Lines(Line(cbind(p1, p2)), ID="a"))) %>%
st_make_grid(n=100, what = "centers") %>%
as("Spatial")
)
l2 <- as.data.frame(
st_as_sf(SpatialLines(list(Lines(Line(cbind(p1, p2)), ID="a"))) %>%
st_make_grid(n=100, what = "centers") %>%
as("Spatial")
)
print(all.equal(l1, l2))
}
# [1] TRUE
# [1] TRUE
# [1] TRUE
# [1] TRUE
# [1] TRUE
# [1] TRUE
# [1] TRUE
# [1] TRUE
# [1] TRUE
# [1] TRUE

How to sum list of list of vector in R

Suppose I have a list of list. I would like to divide each vector of Tau by the sum of all vectors of Tau. That is,
Tau[[1]][[1]] / sum(Tau[[1]],Tau[[1]][[2]],Tau[[2]][[1]],Tau[[2]][[1]])
I would like to do this for each element of Tau. I tried the Reduce but it returns me an error.
tau1 <- rnorm(10,0,1)
tau2 <- rnorm(10,0,1)
tau <- list(tau1, tau2)
tau
tau3 <- rnorm(10, 0,1)
tau4 <- rnorm(10,0,1)
tau5 <- list(tau3, tau4)
tau5
Tau <- list(tau, tau5)
Tau
[[1]]
[[1]][[1]]
[1] 0.41435211 -0.28983281 0.96462705 -1.32050463 -0.15736981 0.07512305
[7] -0.73394053 -0.12630874 0.21886818 1.57760128
[[1]][[2]]
[1] -1.31643065 1.24744501 0.09073152 -1.02300779 0.63927688 -2.09642019
[7] 1.25458113 -0.21542568 -0.07314255 1.02092833
[[2]]
[[2]][[1]]
[1] 0.2582012 0.9561437 -0.8351850 0.3028827 -0.7016825 -0.6400293
[7] 0.1925083 -1.0869632 0.3688728 -0.1837725
[[2]][[2]]
[1] -2.560212660 1.953122685 0.087180131 2.252459267 -0.003317207
[6] -1.767479446 -0.298496963 0.015214568 0.300665882 -1.017860244
Reduce("+", Tau)
Error in f(init, x[[i]]) : non-numeric argument to binary operator
Any help, please?
try:
Tau[[1]][[1]] / sum(unlist(Tau))
and since this is 2 level list of list:
lapply(Tau, FUN = function(x) ### to dive into the first level
lapply(x,FUN = function(x) (x/sum(unlist(Tau)))))
You can try a tidyverse solution
library(tidyverse)
Tau %>%
flatten() %>% # This function removes a level hierarchy from the list.
map(function(x) x/sum(unlist(.))) # This function applies a function to each element of the list
[[1]]
[1] -0.3101120 -0.1273576 0.8624357 0.0390124 0.0715351 0.9489481 0.2550256 -0.6999603
[9] -0.3800367 -0.2465854
[[2]]
[1] 0.67728632 0.19908554 0.22174745 0.06124092 -0.30754775 0.98870176 0.27546143 -1.08813227
[9] 0.38806129 -0.26159621
[[3]]
[1] -0.59082848 -0.12060585 -0.56768982 -0.40329663 -0.34583518 -0.93324998 0.46354885 0.08486158
[9] -0.62973290 0.69373770
[[4]]
[1] 0.23596330 -0.16326350 0.49527439 0.48587260 0.45458206 0.38102570 0.30648348 -0.03425584
[9] -0.16928961 -0.21051518
your data. I added a seed for reproducibility
set.seed(123)
tau1 <- rnorm(10,0,1)
tau2 <- rnorm(10,0,1)
tau3 <- rnorm(10, 0,1)
tau4 <- rnorm(10,0,1)
Tau <- list(list(tau1, tau2), list(tau3, tau4))
Tau <- list(tau, tau5) is making a list with two elements, both of which are themselves lists. So the Reduce call tries to add two lists together, which isn't defined.
You need to use append to combine the elements of two lists into a single list:
Tau <- append(tau, tau5)
Reduce("+", Tau)
# [1] -0.7481876 3.2098496 1.9950819 2.8188345 1.4200328 0.2202510
# [7] 0.1448013 0.8132506 -0.7788742 0.5466227
I think you can just use unlist. From its help file:
Given a list structure x, unlist simplifies it to produce a vector which contains all the atomic components which occur in x.
sum(unlist(Tau))

Mosaicking rasters from the same date in a list of rasters

I am working with R Studio 0.99.902.
I have got a list of rasters (time series of S2 band 5 of a specific tile). The acquisitions of some dates are split into two files which I need to mosaic (they are two different areas of the same tile). This will allow me, later, to build the time series stack. I would like R to detect automatically the files which were acquired on the same date and to mosaick them, without me writing a mosaic function for every couple of duplicated rasters.
This is my raster list:
lst_B5
[1] "09/05/S2A_20160905T104245Z_31UFS_TOC_V100/S2A_20160905T104245Z_31UFS_TOC-B05_20M_V100.tif"
[2] "09/08/S2A_20160908T105416Z_31UFS_TOC_V100/S2A_20160908T105416Z_31UFS_TOC-B05_20M_V100.tif"
[3] "09/18/S2A_20160918T105022Z_31UFS_TOC_V100/S2A_20160918T105022Z_31UFS_TOC-B05_20M_V100.tif"
[4] "09/18/S2A_20160918T105641Z_31UFS_TOC_V100/S2A_20160918T105641Z_31UFS_TOC-B05_20M_V100.tif"
[5] "09/25/S2A_20160925T104115Z_31UFS_TOC_V100/S2A_20160925T104115Z_31UFS_TOC-B05_20M_V100.tif"
[6] "09/28/S2A_20160928T105022Z_31UFS_TOC_V100/S2A_20160928T105022Z_31UFS_TOC-B05_20M_V100.tif"
[7] "09/28/S2A_20160928T105637Z_31UFS_TOC_V100/S2A_20160928T105637Z_31UFS_TOC-B05_20M_V100.tif"
[8] "10/05/S2A_20161005T104018Z_31UFS_TOC_V100/S2A_20161005T104018Z_31UFS_TOC-B05_20M_V100.tif"
[9] "10/08/S2A_20161008T105022Z_31UFS_TOC_V100/S2A_20161008T105022Z_31UFS_TOC-B05_20M_V100.tif"
[10] "10/15/S2A_20161015T104513Z_31UFS_TOC_V100/S2A_20161015T104513Z_31UFS_TOC-B05_20M_V100.tif"
[11] "10/18/S2A_20161018T105035Z_31UFS_TOC_V100/S2A_20161018T105035Z_31UFS_TOC-B05_20M_V100.tif"
[12] "10/25/S2A_20161025T104118Z_31UFS_TOC_V100/S2A_20161025T104118Z_31UFS_TOC-B05_20M_V100.tif"
[13] "10/28/S2A_20161028T105615Z_31UFS_TOC_V100/S2A_20161028T105615Z_31UFS_TOC-B05_20M_V100.tif"
[14] "11/04/S2A_20161104T104250Z_31UFS_TOC_V100/S2A_20161104T104250Z_31UFS_TOC-B05_20M_V100.tif"
[15] "11/07/S2A_20161107T105238Z_31UFS_TOC_V100/S2A_20161107T105238Z_31UFS_TOC-B05_20M_V100.tif"
[16] "11/14/S2A_20161114T104309Z_31UFS_TOC_V100/S2A_20161114T104309Z_31UFS_TOC-B05_20M_V100.tif"
[17] "11/17/S2A_20161117T105325Z_31UFS_TOC_V100/S2A_20161117T105325Z_31UFS_TOC-B05_20M_V100.tif"
[18] "11/24/S2A_20161124T104349Z_31UFS_TOC_V100/S2A_20161124T104349Z_31UFS_TOC-B05_20M_V100.tif"
[19] "11/27/S2A_20161127T105404Z_31UFS_TOC_V100/S2A_20161127T105404Z_31UFS_TOC-B05_20M_V100.tif"
As you can see, some rasters have the same date, but not exactly the same name. In order to find duplicates, I extracted the date of each file from its name, and I assigned the dates as the names of the objects in the list.
names(lst_B5) <- dates_2
where
dates_2
[1] "20160905" "20160908" "20160918" "20160918" "20160925" "20160928" "20160928" "20161005" "20161008" "20161015"
[11] "20161018" "20161025" "20161028" "20161104" "20161107" "20161114" "20161117" "20161124" "20161127"
This allows me to find duplicates within the list:
duplicated(names(lst_B5))
[1] FALSE FALSE FALSE TRUE FALSE FALSE TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE
But I still don't know how to automatically mosaic the duplicates. Have you got any hint?
You can do it inside a loop. Using a reproducible example:
set.seed(123)
rlist <- list() # store all rasters
rlist[[1]] <- raster(nrows=90, ncols=180, xmn=-180, xmx=0, ymn=-90, ymx=90,vals=rnorm(16200))
rlist[[2]] <- raster(nrows=90, ncols=180, xmn=0, xmx=180, ymn=-90, ymx=90,vals=rnorm(16200))
rlist[[3]] <- raster(nrows=90, ncols=180, xmn=-180, xmx=0, ymn=-90, ymx=90,vals=rnorm(16200))
rlist[[4]] <- raster(nrows=90, ncols=180, xmn=0, xmx=180, ymn=-90, ymx=90,vals=rnorm(16200))
dates_2 <- c("20160908","20160918","20160918","20160925") # you dates
dates_2u <- unique(dates_2) # create a vector with unique dates
rlist2 <- list() # a second list to store processed rasters
for(i in seq_along(dates_2u)){
idx <- which(dates_2 %in% dates_2u[i]) #some index
if(length(idx) > 1){
rlisttemp <- rlist[idx] # to create a temporal list
rlisttemp$fun <- mean # set function to make mosaic
rlist2[[i]] <- do.call(mosaic,rlisttemp)
}else{
rlist2[[i]] <- rlist[[idx]] # for non-mosaicking images
}
}
And check results:
plot(rlist2[[1]]);plot(rlist2[[2]]);plot(rlist2[[3]])
In you case, you can load all raster inside a list (to maintain example's code) with something like:
rlist <- lapply(list.files('/your/path/to/rasters',
pattern = '.tif$',full.names = T,
recursive = T),FUN=raster)
Or:
rlist <- lapply(lst_B5, FUN=raster)

Spatial data and memory

I am trying to add up geotiffs but am running into memory issues. R is using all 32GB according the the following R error...
In writeValues(y, x, start = 1) :
Reached total allocation of 32710Mb: see help(memory.size)
I also checked the properties of R and it is 64 bit and the target is...
"C:\Program Files\R\R-3.3.0\bin\x64\Rgui.exe"
The version is
R.Version()
$platform
[1] "x86_64-w64-mingw32"
$arch
[1] "x86_64"
$os
[1] "mingw32"
$system
[1] "x86_64, mingw32"
$status
[1] ""
$major
[1] "3"
$minor
[1] "3.0"
$year
[1] "2016"
$month
[1] "05"
$day
[1] "03"
$`svn rev`
[1] "70573"
$language
[1] "R"
$version.string
[1] "R version 3.3.0 (2016-05-03)"
$nickname
[1] "Supposedly Educational"
So it looks like my max memory is being used by R. I tried the to use bigmemory package in R. So in the code below I tried changing the matrix to big.matrix but that failed and the error occurs when trying to write the output file. Any suggestions for trying to alter the code so less memory is used or try to work in the package ff or bigmemory?
############ LOOP THROUGH AGE MAPS TO COMPILE THE NUMBER OF TIMES A CELL BURNS DURING A GIVEN SPAN OF TIME ####################
## Empirical Fires
print("1 of 3: 2010-2015")
burn.mat<- matrix(0,nrow,ncol) #create matrix of all zero's, the dimension of your landscape (row, col)
# Read in Historical Fire maps
for (j in 2010:2015){ #Year Loop
age.tmp<- as.matrix(raster(paste('fr',j,'.tif',sep=''))) #read in Age Map
burn.mat<- burn.mat+(age.tmp==1) #when something has burned in ALFRESCO empirical fire history files, AGE=1. (age.tmp==0) is a 'logic' cmd, returning a 0,1 map for True/False
#Write the data to a geotiff
out <- raster(burn.mat,xmn=-1692148,xmx= 1321752, ymn = 490809.9, ymx = 2245610, crs = '+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs')
writeRaster(out,filename=paste(outdir,'/burn.mat.hist.1950-2007.tif',sep=''),format = 'GTiff',options='COMPRESS=LZW',datatype='FLT4S',overwrite=T)
}
The problem will probably go away if you use Raster* objects rather than matrices. Something like
library(raster)
r <- raster('fr2010.tif')
burn.mat <- setValues(r, 0)
for (j in 2010:2015) {
age.tmp <- raster(paste0('fr', j, '.tif'))
burn.mat <- burn.mat + (age.tmp==1)
# if age.tmp only has values of 0 and 1 use this instead:
# burn.mat <- burn.mat + age.tmp
}
# write the results outside of the loop
writeRaster(burn.mat, filename=file.path(outdir, 'burn.mat.hist.1950-2007.tif'), options='COMPRESS=LZW',datatype='FLT4S',overwrite=TRUE)
A more direct approach without a loop
files <- paste0('fr', 2010:2015, '.tif'))
s <- stack(files)
burn <- sum(s)
Or
burn <- sum(s == 1)
Or to write to a file in one step
b <- calc(s, sum, filename=file.path(outdir, 'burn.mat.hist.1950-2007.tif'), options='COMPRESS=LZW', datatype='FLT4S', overwrite=TRUE)

RNetLogo error building parameter list with NLReport

I use an R script for parameter sweeping in a NetLogo model. The script opens the model correctly with NLStart and NLLoadModel. In the sweeping loop, I store the values of input parameters by building a list:
run.params <- list(
RD = NLReport("RD?"),
RD.unif = NLReport("RD-unif?"),
Gini = NLReport("gini"),
Gamma = NLReport("gamma"),
GROUP = NLReport("GROUP?"),
SN = NLReport("SN?"),
Group.size = NLReport("group-size"),
Sn.size = NLReport("sn-size"),
W.group = NLReport("w-group"),
W.sn = NLReport("w-sn"),
Num.sn <- NLReport("num-sn"),
LF <- NLReport("LF?"),
L.memory <- NLReport("L-memory"),
LF.agents <- NLReport("LF-agents?"),
MASS.enthusiasm <- NLReport("MASS-enthusiasm?"),
W.crowd <- NLReport("w-crowd")
)
The results are as follows:
$RD
[1] FALSE
...
$Sn.size
[1] 5
$W.group
[1] 0.05
$W.sn
[1] 0.05
[[11]]
[1] 2
[[12]]
[1] FALSE
[[13]]
[1] 5
[[14]]
[1] FALSE
[[15]]
[1] FALSE
[[16]]
[1] 0.01
i.e after W.group, the values are retrieved correctly (as checked by launching NetLogo outside R) but the names are missing. I don't know why this is happening. I would be grateful for any help on that.

Resources