Ackley function implementation in R with persp3d plot - r

x1<-as.matrix(seq(-32.768,32.768,length=100))
x2<-as.matrix(seq(-32.768,32.768,length=100))
X<-cbind(x1,x2)
y <- outer(X,X,Ackley)
Function implementation
Ackley <- function(x1,x2){
a<-20
b<- 0.2
c<-(2*pi)
fofx1<- -a*exp(-b*sqrt((rowSums(X^2)/100)))-
exp(rowSums(cos((c*X)/100)))+a+exp(1)
fofx2 <- -a*exp(-b*sqrt((rowSums(X^2)/100)))-
exp(rowSums(cos((c*X)/100)))+a+exp(1)
return(fofx1+fofx2)
}
i'm getting this as error like this -
Error in dim(robj) <- c(dX, dY) :
dims [product 40000] do not match the length of object [100]
i wont to plot like this

ackley <- function(x1, x2) {
a <- 20
b <- 0.2
c <- (2*pi)
d <- 2
fofx1 <- -a*exp(-b*sqrt(1/d*sum(c(x1,x2)^2))) -
exp(sum( cos(c*c(x1,x2))/d))+a+exp(1)
#fofx2 <- -a*exp(-b*sqrt(sum(c(x1,x2)^2)/100))-
# exp(sum( cos(c*c(x1,x2))/100))+a+exp(1)
return(fofx1)
}
Ackley <- Vectorize(ackley)
x1 <- seq(-32.768,32.768,length=500)
x2 <- seq(-32.768,32.768,length=500)
z <- outer(x1, x2, FUN="Ackley")
library(plotly)
plot_ly(x=~x1, y=~x2, z = ~z, type="surface") %>%
layout(scene=list(aspectratio = list(x = 1, y = 1, z = 1)))

Related

R Getting numeric matrix from predict()

I have the following code:
fit_lm=lm(z~x+y)
mix <- 2
max <- 12
miy <- 2
may <- 12
griddf <- expand.grid(x = seq(mix,max, length.out = 10),
y = seq( miy,may,length.out = 10))
Prediction_data <- data.frame(griddf)
colnames(Prediction_data) <- c("x", "y")
coordinates(Prediction_data ) <- ~ x + y
terrain_lm <- predict(fit_lm, Prediction_data)
I want that terrain_lm is a numeric matrix, in such a way, that I can use
fig <- plot_ly()
fig <- fig %>% add_surface(terrain_lm)
but I get a 1d array with 100 elements.
The result of predict is a vector. You need to add it to the x and y values and then use xtabs to transform into a suitable matrix for a surface plot.
library(plotly)
#test data
x <- runif(20, 4, 10)
y <- runif(20, 3, 6)
z <- 3*x+y +runif(20, 0, 2)
fit_lm <- lm(z~x+y)
mix <- 2
max <- 12
miy <- 2
may <- 12
griddf <- expand.grid(x = seq(mix,max, length.out = 10),
y = seq( miy,may,length.out = 10))
terrain_lm <- data.frame(griddf)
terrain_lm$z <- predict(fit_lm, terrain_lm)
fig <- plot_ly(z = ~xtabs(z ~ x + y, data = terrain_lm))
fig <- fig %>% add_surface()

How can I use try catch for nls function in R

I am doing a regression for a Quadric Linear function. I got two option is to use either nlsLM and nls2. However, for some dataset, the use of nlsLM casing some problem such as: singular gradient matrix at initial parameter estimates or they ran in to an infinitie loop. I want to use the try catch to deal with this issue. Can anyone help me out? Thanks everyone in advance.
Here is the full code:
# Packages needed for estimaton of Ideal trajectory - nonlinear regression
#-------------------------------------------------------------------------------
library("minpack.lm")
library("nlstools")
library("nlsMicrobio")
library("stats")
library("tseries") #runs test for auto correlation
#Use NLS2
library(proto)
library(nls2)
################################################################
# Set working directory
setwd("C:/Users/Kevin Le/PycharmProjects/Pig Data Black Box - Copy")
#load dataset
load("Data/JRPData_TTC.Rdata") #load dataset created in MissingData.step
ID <- 5470
#Create a new dataframe which will store Data after ITC estimation
#Dataframe contains ITC parameters
ITC.param.pos2 <- data.frame(ANIMAL_ID=factor(),
X0=double(),
Y1=double(),
Y2=double(),
Ylast=double(),
a=double(),
b=double(),
c=double(),
d=double(),
stringsAsFactors=FALSE)
#Dataframe contains data points on the ITC
Data.remain <- data.frame(ANIMAL_ID=character(),
Age=double(),
obs.CFI=double(),
tt=double(),
ttt=double(),
stringsAsFactors=FALSE)
#===============================================================
# For loop for automatically estimating ITC of all pigs
#===============================================================
IDC <- seq_along(ID) # 17, 23, 52, 57, 116
for (idc in IDC){
# idc = 1
i <- ID[idc]
Data <- No.NA.Data.1[No.NA.Data.1$ANIMAL_ID == i,]
idc1 <- unique(as.numeric(Data$idc.1))
####### Create data frame of x (Age) and y (CFI) ########
x <- as.numeric(Data$Age.plot)
Y <- as.numeric(Data$CFI.plot)
Z <- as.numeric(Data$DFI.plot)
Data.xy <- as.data.frame(cbind(x,Y))
#Initial parameteres for parameter estimation
X0.0 <- x[1]
Xlast <- x[length(x)]
##################################################################
# 1. reparametrization CFI at X0 = 0
#function used for reparametrization in MAPLE
# solve({
# 0=a+b*X_0+c*X_0**2,
# DFIs=b+2*c*Xs,CFIs=a+b*Xs+c*Xs**2},
# {a,b,c});
# a = -X0*(2*CFIs*Xs-CFIs*X0-Xs^2*DFIs+Xs*DFIs*X0)/(Xs^2-2*X0*Xs+X0^2)
# b = (-Xs^2*DFIs+DFIs*X0^2+2*CFIs*Xs)/(Xs^2-2*X0*Xs+X0^2)
# c = -(CFIs-Xs*DFIs+X0*DFIs)/(Xs^2-2*X0*Xs+X0^2)
# 2. with the source of the function abcd and pred
##################################################################
#Provide set of initial parameters
Xs.1 <- round(seq(X0.0 + 1, Xlast - 1, len = 30), digits = 0)
X0.1 <- rep(X0.0, length(Xs.1))
DFIs.1 <- NULL
CFIs.1 <- NULL
for(A in seq_along(Xs.1)){
DFIs2 <- Data[Data$Age.plot == Xs.1[A],]$DFI.plot
CFIs2 <- Data[Data$Age.plot == Xs.1[A],]$CFI.plot
DFIs.1 <- c(DFIs.1, DFIs2)
CFIs.1 <- c(CFIs.1, CFIs2)
}
st1 <- data.frame(cbind(X0.1, Xs.1, DFIs.1, CFIs.1))
names(st1) <- c("X0","Xs", "DFIs","CFIs")
#RUN NLS2 to find optimal initial parameters
st2 <- nls2(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
Data.xy,
start = st1,
# weights = weight,
# trace = T,
algorithm = "brute-force")
par_init <- coef(st2); par_init
#--------------------------------------------
# Create empty lists to store data after loop
#--------------------------------------------
par <- list()
AC.res <- list()
AC.pvalue <- NULL
data2 <- list()
data3 <- list()
param <- data.frame(rbind(par_init))
par.abcd <- data.frame(rbind(abcd.2(as.vector(par_init))))
param.2 <- data.frame(X0=double(),
Xs=double(),
DFIs=double(),
CFIs=double(),
a=double(),
b=double(),
c=double(),
stringsAsFactors=FALSE)
j <- 2
AC_pvalue <- 0
AC.pvalue[1] <- AC_pvalue
datapointsleft <- as.numeric(dim(Data)[1])
dpl <- datapointsleft #vector of all dataponitsleft at each step
#-------------------------------------------------------------------------------
# Start the procedure of Non Linear Regression
#-------------------------------------------------------------------------------
while ((AC_pvalue<=0.05) && datapointsleft >= 20){
weight <- 1/Y^2
# ---------------- NON linear reg applied to log(Y) ---------------------------------
st2 <- nls2(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
Data.xy,
start = st1,
weights = weight,
trace = F,
algorithm = "brute-force")
par_init <- coef(st2)
par_init
# st1 <- st1[!(st1$Xs == par_init[2]),]
nls.CFI <- nlsLM(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
Data.xy,
control = list(tol = 1e-2, printEval = TRUE, maxiter = 1024),
start = list(X0 = par_init[1], Xs = par_init[2],
DFIs = par_init[3], CFIs = par_init[4]),
weights = weight,
algorithm = "port",
lower = c(-10000,X0.0+1, -10000, -10000),
upper = c(10000, Xlast-1, 10000, 10000),
trace = F)
# nls.CFI <- nls2(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
# Data.xy,
# start = list(X0 = par_init[1], Xs = par_init[2],
# DFIs = par_init[3], CFIs = par_init[4]),
# weights = weight,
# control = nls.control(warnOnly = TRUE),
# trace = T,
# algorithm = "port",
# lower = c(-100000000,X0.0+1, -1000000000, -1000000000),
# upper = c(1000000000, Xlast-1, 1000000000, 1000000000))
# nls.CFI <- nlsLM(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
# Data.xy,
# control = nls.control(warnOnly = TRUE),
# start = list(X0 = par_init[1], Xs = par_init[2],
# DFIs = par_init[3], CFIs = par_init[4]),
# weights = weight,
# algorithm = "port",
# lower = c(-1000000000,X0.0+1, -1000000000, -1000000000),
# upper = c(1000000000, Xlast-1, 1000000000, 1000000000),
# trace = F)
#--------RESULTS analysis GOODNESS of fit
#estimate params
par[[j]] <- coef(nls.CFI)
par.abcd[j,] <- abcd.2(as.vector(coef(nls.CFI) )) #calculation of a, b, c and d
param[j,] <- par[[j]]
param.2[j-1,] <- cbind(param[j,], par.abcd[j,])
#summary
# summ = overview((nls.CFI)) #summary
#residuals
res1 <- nlsResiduals(nls.CFI) #residuals
res2 <- nlsResiduals(nls.CFI)$resi1
res <- res2[, 2]
AC.res <- test.nlsResiduals(res1)
AC.pvalue[j] <- AC.res$p.value
#---------Check for negative residuals----------
#Add filtration step order to data
Step <- rep(j - 1, length(x))
#create a new dataset with predicted CFI included
Data.new <- data.frame(cbind(x, Z, Y, pred.func.2(par[[j]],x)[[1]], res, Step))
names(Data.new) <- c("Age", "Observed_DFI","Observed_CFI", "Predicted_CFI", "Residual", "Step")
# plot(Data.new$Age, Data.new$Predicted_CFI, type = "l", col = "black",lwd = 2,
# ylim = c(0, max(Data.new$Predicted_CFI, Data.new$Observed_CFI)))
# lines(Data.new$Age, Data.new$Observed_CFI, type = "p", cex = 1.5)
#
#remove negative res
Data.pos <- Data.new[!Data.new$Residual<0,]
# lines(Data.pos$Age, Data.pos$Predicted_CFI, type = "l", col = j-1, lwd = 2)
# lines(Data.pos$Age, Data.pos$Observed_CFI, type = "p", col = j, cex = 1.5)
#restart
#Criteria to stop the loop when the estimated parameters are equal to initial parameters
# Crite <- sum(param.2[dim(param.2)[1],c(1:4)] == par_init)
datapointsleft <- as.numeric(dim(Data.pos)[1])
par_init <- par[[j]]
AC_pvalue <- AC.pvalue[j]
j <- j+1
x <- Data.pos$Age
Y <- Data.pos$Observed_CFI
Z <- Data.pos$Observed_DFI
Data.xy <- as.data.frame(cbind(x,Y))
dpl <- c(dpl, datapointsleft)
dpl
#Create again the grid
X0.0 <- x[1]
Xlast <- x[length(x)]
#Xs
if(par_init[2] -15 <= X0.0){
Xs.1 <- round(seq(X0.0 + 5, Xlast - 5, len = 30), digits = 0)
} else if(par_init[2] + 5 >= Xlast){
Xs.1 <- round(seq(par_init[2]-10, par_init[2]-1, len = 6), digits = 0)
} else{
Xs.1 <- round(seq(par_init[2]-5, par_init[2] + 5, len = 6), digits = 0)
}
#
X0.1 <- rep(X0.0, length(Xs.1))
DFIs.1 <- NULL
CFIs.1 <- NULL
for(A in seq_along(Xs.1)){
DFIs2 <- Data[Data$Age.plot == Xs.1[A],]$DFI.plot
CFIs2 <- Data[Data$Age.plot == Xs.1[A],]$CFI.plot
DFIs.1 <- c(DFIs.1, DFIs2)
CFIs.1 <- c(CFIs.1, CFIs2)
}
st1 <- data.frame(cbind(X0.1, Xs.1, DFIs.1, CFIs.1))
if(X0.0 <= par_init[2] && Xlast >=par_init[2]){
st1 <- rbind(st1, par_init)
}
names(st1) <- c("X0","Xs", "DFIs","CFIs")
}
} # end FOR loop
Here is the data file. I have exported my data into the .Rdata for an easier import.: https://drive.google.com/file/d/1GVMarNKWMEyz-noSp1dhzKQNtu2uPS3R/view?usp=sharing
In this file, the set id: 5470 will have this error: singular gradient matrix at initial parameter estimates in this part:
nls.CFI <- nlsLM(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
Data.xy,
control = list(tol = 1e-2, printEval = TRUE, maxiter = 1024),
start = list(X0 = par_init[1], Xs = par_init[2],
DFIs = par_init[3], CFIs = par_init[4]),
weights = weight,
algorithm = "port",
lower = c(-10000,X0.0+1, -10000, -10000),
upper = c(10000, Xlast-1, 10000, 10000),
trace = F)
The complementary functions (file Function.R):
abcd.2 <- function(P){
X0 <- P[1]
Xs <- P[2]
DFIs <- P[3]
CFIs <- P[4]
a <- -X0*(2*CFIs*Xs-CFIs*X0-Xs^2*DFIs+Xs*DFIs*X0)/(Xs^2-2*X0*Xs+X0^2)
b <- (-Xs^2*DFIs+DFIs*X0^2+2*CFIs*Xs)/(Xs^2-2*X0*Xs+X0^2)
c <- -(CFIs-Xs*DFIs+X0*DFIs)/(Xs^2-2*X0*Xs+X0^2)
pp <- as.vector(c(a, b, c))
return(pp)
}
#--------------------------------------------------------------
# NLS function
#--------------------------------------------------------------
nls.func.2 <- function(X0, Xs, DFIs, CFIs){
pp <- c(X0, Xs, DFIs, CFIs)
#calculation of a, b and c using these new parameters
c <- abcd.2(pp)[3]
b <- abcd.2(pp)[2]
a <- abcd.2(pp)[1]
ind1 <- as.numeric(x < Xs)
return (ind1*(a+b*x+c*x^2)+(1-ind1)*((a+b*(Xs)+c*(Xs)^2)+(b+2*c*(Xs))*(x-(Xs))))
}
#--------------------------------------------------------------
# Fit new parameters to a quadratic-linear function of CFI
#--------------------------------------------------------------
pred.func.2 <- function(pr,age){
#
X0 <- pr[1]
Xs <- pr[2]
DFIs <- pr[3]
CFIs <- pr[4]
#
x <- age
#calculation of a, b and c using these new parameters
c <- abcd.2(pr)[3]
b <- abcd.2(pr)[2]
a <- abcd.2(pr)[1]
#
ind1 <- as.numeric(x < Xs)
#
results <- list()
cfi <- ind1*(a+b*x+c*x^2)+(1-ind1)*((a+b*(Xs)+c*(Xs)^2)+(b+2*c*(Xs))*(x-(Xs))) #CFI
dfi <- ind1*(b+2*c*x) + (1 - ind1)*(b+2*c*(Xs)) #DFI
results[[1]] <- cfi
results[[2]] <- dfi
return (results)
}
#---------------------------------------------------------------------------------------------------------------
# Quadratic-linear function of CFI curve and its 1st derivative (DFI) with original parameters (only a, b and c)
#---------------------------------------------------------------------------------------------------------------
pred.abcd.2 <- function(pr,age){
#
a <- pr[1]
b <- pr[2]
c <- pr[3]
x <- age
#calculation of a, b and c using these new parameters
#
ind1 <- as.numeric(x < Xs)
#
results <- list()
cfi <- ind1*(a+b*x+c*x^2)+(1-ind1)*((a+b*(Xs)+c*(Xs)^2)+(b+2*c*(Xs))*(x-(Xs))) #CFI
dfi <- ind1*(b+2*c*x) + (1 - ind1)*(b+2*c*(Xs)) #DFI
results[[1]] <- cfi
results[[2]] <- dfi
return (results)
}
Updated: I did review my logic from the previous step and found that my data is a bit messed up because of it. I have fixed it. The case where a set f data ran into an infinite loop has no longer exists, but this error is still there however: singular gradient matrix at initial parameter estimates.

mdply (or similar) a function instead of for loop

I want to generate data from a function iterating over a range of values. The setting is best explained in a small example:
myfun <- function(a, b, sims) {
x = 3/a*b
y = mean(a*rnorm(sims))
return(data.frame(x = x, y = y))
}
# Output I want:
d <- data.frame(x = 0, y= 0)
d[1,] <- myfun(a=4, b=2, sims = 100)
d[2,] <- myfun(a=4, b=3, sims = 100)
d[3,] <- myfun(a=4, b=4, sims = 100)
# --> With a for loop this is easy
# Using mdply, however, does not work
a <- expand.grid(a=1:3)
d <- plyr::mdply(a, myfun, b=seq(1,100, length=100), sims = 100)
You can use Map :
data <- expand.grid(a = 1:3, b = 1:100)
result <- do.call(rbind, Map(myfun, data$a, data$b, MoreArgs = list(sims = 100)))
head(result)
# x y
#1 3.0 -0.17846248
#2 1.5 0.06837716
#3 1.0 0.01034184
#4 6.0 -0.02898619
#5 3.0 0.10077290
#6 2.0 0.22321839
A similar way would be if you Vectorize myfun. Vectorize is a wrapper around mapply.
myfun_vec <- Vectorize(myfun)
t(myfun_vec(data$a, data$b, 100))
A purrr option :
result <- purrr::map2_df(data$a, data$b, myfun, sims = 100)

R-caret-plyr : how to modify downSample function to create sampled data of different proportions

Below is the downSample function of caret that I found here .
downSample <- function(x, y, list = FALSE, yname = "Class")
{
xc <- class(x)
if(!is.data.frame(x)) x <- as.data.frame(x)
if(!is.factor(y))
{
warning("Down-sampling requires a factor variable as the response. The original data was returned.")
return(list(x = x, y = y))
}
minClass <- min(table(y))
x$.outcome <- y
x <- ddply(x, .(y),
function(dat, n) dat[sample(seq(along = dat$.outcome), n),,drop = FALSE],
n = minClass)
y <- x$.outcome
x <- x[, !(colnames(x) %in% c("y", ".outcome")), drop = FALSE]
if(list)
{
if(xc[1] == "matrix") x <- as.matrix(x)
out <- list(x = x, y = y)
} else {
out <- cbind(x, y)
colnames(out)[ncol(out)] <- yname
}
out
}
suppose that my data set is iris :
data(iris)
x <- iris[, -5]
y <- iris[, 5]
to make the response variable a hugely unbalanced binary one :
y[-c(130, 146)] <- "setosa"
There are now therefore two instances of "virginica" and 148 instances of "setosa". I would like to modify the function downSample so that, in the end, instead of returning a subsampled data set with 50% of minClass, it returns a subsampled data set with for instance 30% (k) of minor class and 70% of major class. Because using the downSample function for n instances in the minClass it selects n instances of the other class to get a fully balanced data set. But in my case I loose a lot of data so I just want to balance it a bit not fully.
Let's suppose that k = 20 % i.e. in the end I want 20% of minClaas and 80% of the other class. I have already tried to modify this part of function :
x <- ddply(x, .(y), function(dat, n)
dat[sample(seq(along = dat$.outcome), n),, drop = FALSE], n = minClass)
by changing n to 4*n but I did not achieve it. There is this error :
Error in size <= n/2 :
comparison (4) is possible only for atomic and list types
Your help would be appreciated.
A simple way to perform this is to change the n = minClass part of the ddply call.
downSample_custom <- function(x, y, list = FALSE, yname = "Class", frac = 1){ #add argument frac which is in the 0 - 1 range
xc <- class(x)
if(!is.data.frame(x)) x <- as.data.frame(x)
if(!is.factor(y))
{
warning("Down-sampling requires a factor variable as the response. The original data was returned.")
return(list(x = x, y = y))
}
minClass <- min(table(y))
x$.outcome <- y
x <- ddply(x, .(y),
function(dat, n) dat[sample(seq(along = dat$.outcome), n),,drop = FALSE],
n = minClass*frac) #change the n to this
y <- x$.outcome
x <- x[, !(colnames(x) %in% c("y", ".outcome")), drop = FALSE]
if(list)
{
if(xc[1] == "matrix") x <- as.matrix(x)
out <- list(x = x, y = y)
} else {
out <- cbind(x, y)
colnames(out)[ncol(out)] <- yname
}
out
}
Does it work:
library(plyr)
imbalanced y:
set.seed(1)
y <- as.factor(sample(c("M", "F"),
prob = c(0.1, 0.9),
size = 10000,
replace = TRUE))
x <- rnorm(10000)
table(downSample_custom(x, y)[,2])
output:
F M
1044 1044
table(downSample_custom(x, y, frac = 0.5)[,2])
output:
F M
522 522
table(downSample_custom(x, y, frac = 0.2)[,2])
output
F M
208 208
using frac > 1 returns an error:
downSample_custom(x, y, frac = 2)
output
Error in sample.int(length(x), size, replace, prob) :
cannot take a sample larger than the population when 'replace = FALSE'
EDIT: answer to the updated question.
This can be achieved for instance by sampling the indexes of each class separately. Here is an example that works only for two class problems:
downSample_custom <- function(x, y, yname = "Class", frac = 1){
lev <- levels(y)
minClass <- min(table(y))
lev_min <- levels(y)[which.min(table(y))]
inds_down <- sample(which(y == lev[lev != lev_min]), size = minClass * frac) #sample the indexes of the more abundant class according to minClass * frac
inds_minClass <- which(y == lev[lev == lev_min]) #take all the indexes of the lesser abundant class
out <- data.frame(x, y)
out <- out[sort(c(inds_down, inds_minClass)),]
colnames(out)[ncol(out)] <- yname
return(out)
}
how it looks in practice:
table(downSample_custom(x, y)[,2])
output:
F M
1044 1044
table(downSample_custom(x, y, frac = 5)[,2])
output:
F M
5220 1044
head(downSample_custom(x, y, frac = 5))
output:
x Class
1 -1.5163733 F
2 0.6291412 F
4 1.1797811 M
5 1.1176545 F
6 -1.2377359 F
7 -1.2301645 M

Vectorized approach for the following task

The following code works, but as expected, it takes ages to execute for large vectors.
What would be the vectorised way to accomplish the same task:
x <- seq(0,10,0.01)
y <- seq(0,10,0.01)
df <- data.frame(vector1 = rnorm(10000), vector2 = rnorm(10000), vector3 = rnorm(10000))
m.out <- matrix(nrow=length(x),ncol = length(y))
a <- df$vector1
b <- df$vector2
c <- df$vector3
for (i in 1:length(x)){
for(j in 1:length(y)){
m.out[i,j] <- cor((x[i]*a + y[j]*b),c,use="complete.obs",method = "pearson")
}
}
Thanks,
Please see vectorized version below, you can use mapply and expand.grid. To return to wide dataset format you can use dcast of reshape2 package (however it still takes some time):
set.seed(123)
x <- seq(0, 10, 0.01)
y <- seq(0, 10, 0.01)
# simulation
df <- data.frame(vector1 = rnorm(length(x)), vector2 = rnorm(length(x)), vector3 = rnorm(length(x)))
a <- df$vector1
b <- df$vector2
c <- df$vector3
v <- expand.grid(x, y)
v$out <- mapply(function(n, m) cor(n * a + m * b, c, use = "complete.obs", method = "pearson"), v[, 1], v[, 2])
library(reshape2)
z <- dcast(v, Var1 ~ Var2)
rownames(z) <- z$Var1
z <- z[, -1]
head(z[, 1:5])
Output:
0 0.01 0.02 0.03 0.04
0 NA 0.0140699293 0.0140699293 0.0140699293 0.0140699293
0.01 -0.01383734 0.0003350528 0.0065542508 0.0090938390 0.0103897953
0.02 -0.01383734 -0.0059841841 0.0003350528 0.0042062076 0.0065542508
0.03 -0.01383734 -0.0086178379 -0.0035752709 0.0003350528 0.0031310581
0.04 -0.01383734 -0.0099713568 -0.0059841841 -0.0024814273 0.0003350528
0.05 -0.01383734 -0.0107798236 -0.0075458061 -0.0045052606 -0.0018627055

Resources