Make less sparsity when selecting features in text mining - r

I try to create a sentiment analysis of customer reviews using R .
Here the R code :
train.index <- 1:50000
test.index <- 50001:nrow(Data_clean)
#clean up
# remove grammar/punctuation
Data_clean$Raison.Reco.clean <- tolower(gsub('[[:punct:]0-9]', '', Data_clean$Raison.Reco))
train <- Data_clean[train.index, ]
test <- Data_clean[test.index, ]
word.freq <- function(document.vector, sparsity = .999)
{
temp.tf <- document.vector %>% tokens(ngrams = 1:3) %>% # generate tokens
dfm # generate dfm
freq.df <- colSums(temp.tf)
freq.df <- data.frame(word = names(freq.df), freq = freq.df)
rownames(freq.df) <- NULL
return(freq.df)
}
word.freq.pos <- word.freq(train$Raison.Reco.clean[train$Note.Reco == "10"])
word.freq.neg <- word.freq(train$Raison.Reco.clean[train$Note.Reco == "0"])
#merge les deux tables
freq.all <- merge(word.freq.pos, word.freq.neg, by ='word', all = T )
#clean'up
freq.all$freq.x[is.na(freq.all$freq.x)] <- 0
freq.all$freq.y[is.na(freq.all$freq.y)] <- 0
#calculer la difference
freq.all$diff <- abs(freq.all$freq.x - freq.all$freq.y)
head(freq.all[order(-freq.all$diff), ])
#NDSI
#smoothing term
alpha <- 30
#ndsi
freq.all$ndsi <- abs(freq.all$freq.x - freq.all$freq.y) / (freq.all$freq.x + freq.all$freq.y + 2*alpha)
head(freq.all[order(-freq.all$ndsi), ])
#number of feature in the matrix
num.features <- 500
#sort the values
freq.all <- freq.all[order(-freq.all$ndsi), ]
############################################## tf-idf ###################################################################################
#cast to a string
freq.all$word <- as.character(freq.all$word)
#construct the tf matrix
tf <- t(apply(t(train$Raison.Reco.clean), 2, function(x) str_count(x, freq.all$word[1:num.features])))
idf <- log(length(train.index)/colSums(sign(tf[train.index, ])))
idf[is.infinite(idf)] <- 0
tf.idf <- as.data.frame(t(apply(tf, 1, function(x) x * idf)))
colnames(tf.idf) <- freq.all$word[1:num.features]
##################################################################### Random forest #############################################
# train random forest classifier
ndsi.forest <- randomForest(tf.idf[train.index, ], as.factor(train$Note.Reco[train.index]), ntree = 100)
My question here is : is there anyway to elliminate the stop words or to make less sparse the tf-idf matrix?
Thank you in advance

Related

Cannot make sense of the error while using OptimParallel in R

I'm trying to run the following function mentioned below using OptimParallel in R on a certain data set. The code is as follows:
install.packages("optimParallel")
install.packages('parallel')
library(parallel)
library(optimParallel)
library(doParallel)
library(data.table)
library(Rlab)
library(HDInterval)
library(mvtnorm)
library(matrixStats)
library(dplyr)
library(cold)
## Bolus data:
data("bolus")
d1 <- bolus
d1$group <- ifelse(d1$group == "2mg",1,0)
colnames(d1) <- c("index",'group',"time","y")
d2 <- d1 %>% select(index, y, group, time)
colnames(d2) <- c('index','y','x1','x2') ### Final data
## Modification of the objective function:
## Another approach:
dpd_poi <- function(x,fixed = c(rep(FALSE,5))){
params <- fixed
dpd_1 <- function(p){
params[!fixed] <- p
alpha <- params[1]
beta_0 <- params[2]
beta_1 <- params[3]
beta_2 <- params[4]
rho <- params[5]
add_pi <- function(d){
k <- beta_0+(d[3]*beta_1)+(d[4]*beta_2)
k1 <- exp(k) ## for Poisson regression
d <- cbind(d,k1)
}
dat_split <- split(x , f = x$index)
result <- lapply(dat_split, add_pi)
result <- rbindlist(result)
result <- as.data.frame(result)
colnames(result) <- c('index','y','x1','x2','lamb')
result_split <- split(result, f = result$index)
expression <- function(d){
bin <- as.data.frame(combn(d$y , 2))
pr <- as.data.frame(combn(d$lamb , 2))
## Evaluation of the probabilities:
f_jk <- function(u,v){
dummy_func <- function(x,y){
ppois(x, lambda = y)
}
dummy_func_1 <- function(x,y){
ppois(x-1, lambda = y)
}
k <- mapply(dummy_func,u,v)
k_1 <- mapply(dummy_func_1,u,v)
inv1 <- inverseCDF(as.matrix(k), pnorm)
inv2 <- inverseCDF(as.matrix(k_1), pnorm)
mean <- rep(0,2)
lower <- inv2
upper <- inv1
corr <- diag(2)
corr[lower.tri(corr)] <- rho
corr[upper.tri(corr)] <- rho
prob <- pmvnorm(lower = lower, upper = upper, mean = mean, corr = corr)
prob <- (1+(1/alpha))*(prob^alpha)
## First expression: (changes for Poisson regression)
lam <- as.vector(t(v))
v1 <- rpois(1000, lambda = lam[1])
v2 <- rpois(1000, lambda = lam[2])
all_possib <- as.data.frame(rbind(v1,v2))
new_func <- function(u){
k <- mapply(dummy_func,u,v)
k_1 <- mapply(dummy_func_1,u,v)
inv1_1 <- inverseCDF(as.matrix(k), pnorm)
inv2_1 <- inverseCDF(as.matrix(k_1), pnorm)
mean1 <- rep(0,2)
lower1 <- inv2_1
upper1 <- inv1_1
corr1 <- diag(2)
corr1[lower.tri(corr1)] <- rho
corr1[upper.tri(corr1)] <- rho
prob1 <- pmvnorm(lower = lower1, upper = upper1, mean = mean1, corr = corr1)
prob1 <- prob1^(alpha)
}
val <- apply(all_possib, 2, new_func)
val_s <- mean(val) ## approximation
return(val_s - prob)
}
final_res <- mapply(f_jk, bin, pr)
final_value <- sum(final_res)
}
u <- sapply(result_split,expression)
return(sum(u))
}
}
## run the objective function:
cl <- makeCluster(25)
setDefaultCluster(cl=cl)
clusterExport(cl,c('d2','val'))
clusterEvalQ(cl,c(library(data.table), library(Rlab),library(HDInterval),library(mvtnorm),library(matrixStats),library(dplyr),library(cold)))
val <- dpd_poi(d2, c(0.5,FALSE,FALSE,FALSE,FALSE))
optimParallel(par = c(beta_0 =1, beta_1 =0.1 ,beta_2 = 1,rho=0.2),fn = val ,method = "L-BFGS-B",lower = c(-10,-10,-10,0),upper = c(Inf,Inf,Inf,1))
stopCluster(cl)
After running for some time, it returns the following error:
checkForRemoteErrors(val)
9 nodes produced errors; first error: missing value where TRUE/FALSE needed
However, when I make a minor change in the objective function (pick 2 random numbers from rpois instead of 1000) and run the same code using optim, it converges and gives me a proper result. This is a Monte Carlo simulation and it does not make sense to draw so few Poisson variables. I have to use optimParllel, otherwise, it takes way too long to converge. I could also run this code using simulated data.
I'm unable to figure out where the issue truly lies. I truly appreciate any help in this regard.

Classification in high-dimensional data by extreme learning machine in R

When I simulate 50 high-dimensional data set from multivariate normal distribution and classify by ELM, I find the AUC scores I get very different from each other in each loop. In some loops it goes below 50%. Where am I doing wrong? I want the deviation in the scores I get to be low for each loop. How can I achieve stable and high scores for each loop? How should I make changes in data generation? I look forward to your valuable contributions.
install.packages("MASS")
install.packages("stats")
install.packages("pROC")
install.packages("elmNNRcpp")
library(MASS)
library(stats)
library(pROC)
library(elmNNRcpp)
######################################################
# DATA SIMULATE FUNCTION
######################################################
# rm(list = ls())
generateData<- function(n,p) {
pr <- seq(0.80, 0.40, length.out = p)
pr[1] <- 1
covmat <- toeplitz(pr)
mu= rep(0,p)
X_ <- data.frame(mvrnorm(n, mu = mu, Sigma = covmat))
X <- unname(as.matrix(sample(X_)))
vCoef = rnorm(ncol(X))
vProb =exp(X%*%vCoef)/(1+exp(X%*%vCoef))
Y <- rbinom(nrow(X), 1, vProb)
mydata= data.frame(cbind(X,Y))
return(mydata)
}
######################################################
# SIMULATED DATA
######################################################
n <- 100
p <- 120
nsim <- 50
set.seed(123)
mydata <- list()
for (k in 1 : nsim ) {
data <- generateData(n , p)
# table(data[ncol(data)])
X <- data[-ncol(data)]
Y <- data[ncol(data)]
mydata[[k]] <- data
}
######################################################
# ELM CLASSIFICATION
######################################################
######################################################
# ELM CLASSIFICATION FUNCTION
######################################################
fELMCLASS <- function(x, col_names){
trainIndex <- sample(1:nrow(x), size=0.7*nrow(x))
trainSet <- x[trainIndex,]
testSet <- x[-trainIndex,]
xtrain <- as.matrix(trainSet[, 1:(length(trainSet)-1)])
ytrain <- as.matrix(trainSet[, length(trainSet)])
xtest <- as.matrix(testSet[, 1:(length(testSet)-1)])
ytest <- as.matrix(testSet[, length(testSet)])
model=elm_train(xtrain, ytrain, nhid=25 , actfun='relu')
pred.class=elm_predict(model,xtest,normalize=TRUE)
roc.model=roc(as.factor(ytest) ~ as.numeric(pred.class), direction=c("auto"))
performance_metrics <-t(data.frame("AUC" = roc.model$auc))
return(performance_metrics)
}
######################################################
######################################################
# SAVE RESULTS
######################################################
datalist = data.frame()
for (j in 1:nsim) {
data_ELM <- as.data.frame(mydata[[j]])
datalist <- rbind(datalist,fELMCLASS(data_ELM, "data"))
}
datalist

Neural Networks in RStudio

I am trying to predict the variable price range which has 4 levels (0,1,2,3) with the neural network.
But the table with the results just have 2 lines, instead of 4.
I don't know why the table is not 4x4.
The idx only has ones and fours. And the pred only has 0 and 3.
Sometimes the table has 3 lines, but never 4. And most of the times has only 2.
Please help.
THAANK YOUU
#install.packages("rJava")
library(rJava)
#install.packages("xlsx")
library(xlsx)
#Read file of dataToModelset
setwd("C:/Users/Utilizador/Desktop/AE")
dataread <- read.xlsx("datasetDeleteOutliers.xlsx",1)
dataread <-datasetDeleteOutliers
dataread <-datasetCorrelation
dataread <-datasetMedian
dataread <-datasetNearestNeighbor
#
data <- data.frame(dataread)
str(data)
data$clock_speed=as.numeric(data$clock_speed)
data$m_dep=as.numeric(data$m_dep)
str(data)
# Search for binary variables
is_binary <- function(x) length(unique(x)) == 2
sapply(data, is_binary)
summary(data)#para ver o resumo das carateristicas dos dados
#data normalization
#z_score_function <- function(y) {(y - mean(y, na.rm=TRUE))/(sd(y,na.rm=TRUE))}
#normed <- apply(data,2, z_score_function)
#dataNormed <- data.frame(normed)
#normalization <- function(y) {
#return ((y - min(y)) / (max(y) - min(y)))
#}
# Min-Max Normalization
#data$ram <- (data$ram - min(data$ram)) / (max(data$ram) - min(data$ram))
#data$px_height <- (data$px_height - min(data$px_height)) / (max(data$px_height) - min(data$px_height))
#data$px_width <- (data$px_width - min(data$px_width)) / (max(data$px_width) - min(data$px_width))
#data$battery_power <- (data$battery_power - min(data$battery_power)) / (max(data$battery_power) - min(data$battery_power))
# Declare to R which variables are numeric and which are categorical/nominal to R
cols_to_factors <- c('blue','dual_sim','four_g','three_g','touch_screen','wifi')
dataToModel <- data.frame(lapply(data[,cols_to_factors],as.factor))
dataToModel <- cbind(data[,!colnames(data)%in%cols_to_factors],dataToModel)
rm(cols_to_factors)
## Reordering the columns
dataToModel <- dataToModel[,c(1,16,2,17,3,18,4:14,19,20,21,15)]
## Variable to coerce to integer
dataToModel$n_cores <- as.integer(dataToModel$n_cores)
dataToModel$price_range<-as.factor(dataToModel$price_range)
#IDs
dataToModel<-cbind(c(1:nrow(dataToModel)),dataToModel)
colnames(dataToModel)[1]<-"id"
str(dataToModel)
#MOdel Neural Networks
library(neuralnet)
library(dplyr)
# Make training and validation data
set.seed(123)
train <- sample(nrow(dataToModel), nrow(dataToModel)*0.7)
test <- seq(nrow(dataToModel))[-train]
train_N <- dataToModel[train,]
test_N <- dataToModel[test,]
# Binarize the categorical output
train_N <- cbind(train_N, train_N$price_range == '0')
train_N <- cbind(train_N, train_N$price_range == '1')
train_N <- cbind(train_N, train_N$price_range == '2')
train_N <- cbind(train_N, train_N$price_range == '3')
names(train_N)[23:26] <- c('level_0', 'level_1', 'level_2','level_3')
# Fit model
nn <- neuralnet(
level_0 + level_1 + level_2+ level_3 ~ ram+px_height+px_width+battery_power,
data=train_N, hidden = 4,lifesign = "full")
print(nn)
plot(nn)
comp <- compute(nn, test_N[,-22])
pred.weights <- comp$net.result
idx <- apply(pred.weights,1, which.max)
idx
pred <- c('0','1','2','3')[idx]
pred
table(pred, test_N$price_range)
accuracy_NN<-sum(pred==test_N$price_range)/nrow(test_N)
accuracy_NN

Speeding up linear model fitting on complete pairwise observations in large sparse matrix in R

I have a numeric data.frame df with 134946 rows x 1938 columns.
99.82% of the data are NA.
For each pair of (distinct) columns "P1" and "P2", I need to find which rows have non-NA values for both and then do some operations on those rows (linear model).
I wrote a script that does this, but it seems quite slow.
This post seems to discuss a related task, but I can't immediately see if or how it can be adapted to my case.
Borrowing the example from that post:
set.seed(54321)
nr = 1000;
nc = 900;
dat = matrix(runif(nr*nc), nrow=nr)
rownames(dat) = paste(1:nr)
colnames(dat) = paste("time", 1:nc)
dat[sample(nr*nc, nr*nc*0.9)] = NA
df <- as.data.frame(dat)
df_ps <- names(df)
N_ps <- length(df_ps)
My script is:
tic = proc.time()
out <- do.call(rbind,sapply(1:(N_ps-1), function(i) {
if (i/10 == floor(i/10)) {
cat("\ni = ",i,"\n")
toc = proc.time();
show(toc-tic);
}
do.call(rbind,sapply((i+1):N_ps, function(j) {
w <- which(complete.cases(df[,i],df[,j]))
N <- length(w)
if (N >= 5) {
xw <- df[w,i]
yw <- df[w,j]
if ((diff(range(xw)) != 0) & (diff(range(yw)) != 0)) {
s <- summary(lm(yw~xw))
o <- c(i,j,N,s$adj.r.squared,s$coefficients[2],s$coefficients[4],s$coefficients[8],s$coefficients[1],s$coefficients[3],s$coefficients[7])} else {
o <- c(i,j,N,rep(NA,7))
}
} else {o <- NULL}
return(o)
},simplify=F))
}
,simplify=F))
toc = proc.time();
show(toc-tic);
This takes about 10 minutes on my machine.
You can imagine what happens when I need to handle a much larger (although more sparse) data matrix. I never managed to finish the calculation.
Question: do you think this could be done more efficiently?
The thing is I don't know which operations take more time (subsetting of df, in which case I would remove duplications of that? appending matrix data, in which case I would create a flat vector and then convert it to matrix at the end? ...).
Thanks!
EDIT following up from minem's post
As shown by minem, the speed of this calculation strongly depended on the way linear regression parameters were calculated. Therefore changing that part was the single most important thing to do.
My own further trials showed that: 1) it was essential to use sapply in combination with do.call(rbind, rather than any flat vector, to store the data (I am still not sure why - I might make a separate post about this); 2) on the original matrix I am working on, much more sparse and with a much larger nrows/ncolumns ratio than the one in this example, using the information on the x vector available at the start of each i iteration to reduce the y vector at the start of each j iteration increased the speed by several orders of magnitude, even compared with minem's original script, which was already much better than mine above.
I suppose the advantage comes from filtering out many rows a priori, thus avoiding costly xna & yna operations on very long vectors.
The modified script is the following:
set.seed(54321)
nr = 1000;
nc = 900;
dat = matrix(runif(nr*nc), nrow = nr)
rownames(dat) = paste(1:nr)
colnames(dat) = paste("time", 1:nc)
dat[sample(nr*nc, nr*nc*0.90)] = NA
df <- as.data.frame(dat)
df_ps <- names(df)
N_ps <- length(df_ps)
tic = proc.time()
naIds <- lapply(df, function(x) !is.na(x))
dl <- as.list(df)
rl <- sapply(1:(N_ps - 1), function(i) {
if ((i-1)/10 == floor((i-1)/10)) {
cat("\ni = ",i,"\n")
toc = proc.time();
show(toc-tic);
}
x <- dl[[i]]
xna <- which(naIds[[i]])
rl2 <- sapply((i + 1):N_ps, function(j) {
y <- dl[[j]][xna]
yna <- which(naIds[[j]][xna])
w <- xna[yna]
N <- length(w)
if (N >= 5) {
xw <- x[w]
yw <- y[yna]
if ((min(xw) != max(xw)) && (min(yw) != max(yw))) {
# extracts from lm/lm.fit/summary.lm functions
X <- cbind(1L, xw)
m <- .lm.fit(X, yw)
# calculate adj.r.squared
fitted <- yw - m$residuals
rss <- sum(m$residuals^2)
mss <- sum((fitted - mean(fitted))^2)
n <- length(m$residuals)
rdf <- n - m$rank
# rdf <- df.residual
r.squared <- mss/(mss + rss)
adj.r.squared <- 1 - (1 - r.squared) * ((n - 1L)/rdf)
# calculate se & pvals
p1 <- 1L:m$rank
Qr <- m$qr
R <- chol2inv(Qr[p1, p1, drop = FALSE])
resvar <- rss/rdf
se <- sqrt(diag(R) * resvar)
est <- m$coefficients[m$pivot[p1]]
tval <- est/se
pvals <- 2 * pt(abs(tval), rdf, lower.tail = FALSE)
res <- c(m$coefficients[2], se[2], pvals[2],
m$coefficients[1], se[1], pvals[1])
o <- c(i, j, N, adj.r.squared, res)
} else {
o <- c(i,j,N,rep(NA,7))
}
} else {o <- NULL}
return(o)
}, simplify = F)
do.call(rbind, rl2)
}, simplify = F)
out2 <- do.call(rbind, rl)
toc = proc.time();
show(toc - tic)
E.g. try with nr=100000; nc=100.
I should probably mention that I tried using indices, i.e.:
naIds <- lapply(df, function(x) which(!is.na(x)))
and then obviously generating w by intersection:
w <- intersect(xna,yna)
N <- length(w)
This however is slower than the above.
Larges bottleneck is lm function, because there are lot of checks & additional calculations, that you do not necessarily need. So I extracted only the needed parts.
I got this to run in +/- 18 seconds.
set.seed(54321)
nr = 1000;
nc = 900;
dat = matrix(runif(nr*nc), nrow = nr)
rownames(dat) = paste(1:nr)
colnames(dat) = paste("time", 1:nc)
dat[sample(nr*nc, nr*nc*0.9)] = NA
df <- as.data.frame(dat)
df_ps <- names(df)
N_ps <- length(df_ps)
tic = proc.time()
naIds <- lapply(df, function(x) !is.na(x)) # outside loop
dl <- as.list(df) # sub-setting list elements is faster that columns
rl <- sapply(1:(N_ps - 1), function(i) {
x <- dl[[i]]
xna <- naIds[[i]] # relevant logical vector if not empty elements
rl2 <- sapply((i + 1):N_ps, function(j) {
y <- dl[[j]]
yna <- naIds[[j]]
w <- xna & yna
N <- sum(w)
if (N >= 5) {
xw <- x[w]
yw <- y[w]
if ((min(xw) != max(xw)) && (min(xw) != max(xw))) { # faster
# extracts from lm/lm.fit/summary.lm functions
X <- cbind(1L, xw)
m <- .lm.fit(X, yw)
# calculate adj.r.squared
fitted <- yw - m$residuals
rss <- sum(m$residuals^2)
mss <- sum((fitted - mean(fitted))^2)
n <- length(m$residuals)
rdf <- n - m$rank
# rdf <- df.residual
r.squared <- mss/(mss + rss)
adj.r.squared <- 1 - (1 - r.squared) * ((n - 1L)/rdf)
# calculate se & pvals
p1 <- 1L:m$rank
Qr <- m$qr
R <- chol2inv(Qr[p1, p1, drop = FALSE])
resvar <- rss/rdf
se <- sqrt(diag(R) * resvar)
est <- m$coefficients[m$pivot[p1]]
tval <- est/se
pvals <- 2 * pt(abs(tval), rdf, lower.tail = FALSE)
res <- c(m$coefficients[2], se[2], pvals[2],
m$coefficients[1], se[1], pvals[1])
o <- c(i, j, N, adj.r.squared, res)
} else {
o <- c(i,j,N,rep(NA,6))
}
} else {o <- NULL}
return(o)
}, simplify = F)
do.call(rbind, rl2)
}, simplify = F)
out2 <- do.call(rbind, rl)
toc = proc.time();
show(toc - tic);
# user system elapsed
# 17.94 0.11 18.44

How can I save randomly generated train and test datasets?

I am using a for loop to generate 100 different train and test sets.
What I want to do now, is to save these 100 different train and test sets in order to be able to have a look at e.g. where iteration was 17.
This code shows my program with the for loop and the division into train and test set:
result_df<-matrix(ncol=3,nrow=100)
colnames(result_df)<-c("Acc","Sens","Spec")
for (g in 1:100 )
{
# Divide into Train and test set
smp_size <- floor(0.8 * nrow(mydata1))
train_ind <- sample(seq_len(nrow(mydata1)), size = smp_size)
train <- mydata1[train_ind, ]
test <- mydata1[-train_ind, ]
REST OF MY CODE
# Calculate some statistics
overall <- cm$overall
overall.accuracy <- format(overall['Accuracy'] * 100, nsmall =2, digits = 2)
overall.sensitivity <- format(cm$byClass['Sensitivity']* 100, nsmall =2, digits = 2)
overall.specificity <- format(cm$byClass['Specificity']* 100, nsmall =2, digits = 2)
result_df[g,1] <- overall.accuracy
result_df[g,2] <- overall.sensitivity
result_df[g,3] <- overall.specificity
}
How can I do this?
You could do the following, for example, saving each test and train sets as elements in a list:
result_df<-matrix(ncol=3,nrow=100)
colnames(result_df)<-c("Acc","Sens","Spec")
testlist <- list()
trainlist <- list()
for (g in 1:100 )
{
# Divide into Train and test set
smp_size <- floor(0.8 * nrow(mydata1))
train_ind <- sample(seq_len(nrow(mydata1)), size = smp_size)
train <- mydata1[train_ind, ]
test <- mydata1[-train_ind, ]
trainlist[[g]] <- train
testlist[[g]] <- test
}
EDIT
To retrieve the 7th element of these lists you could use trainlist[[7]]
You can save those in csv file by using the following method
write.csv(train, file = paste0("train-", Sys.time(), ".csv", sep=""))
write.csv(test, file = paste0("test-", Sys.time(), ".csv", sep=""))
One option could be to save the row indexes of your partitions, rather than saving all the datasets, and then select the rows indexes for the iteration you're interested in.
The caret package has a function called createDataPartition, which will do this for you:
library(caret)
df <- data.frame(col1 = rnorm(100), col2 = rnorm(100))
# create 100 partitions
train.idxs <- createDataPartition(1:nrow(df), times = 100, p = 0.8)
for(i in 1:length(train.idxs)) {
# create train and test sets
idx <- train.idxs[[i]]
train.df <- df[idx, ]
test.df <- df[-idx, ]
# calculate statistics ...
result_df[i,1] <- overall.accuracy
result_df[i,2] <- overall.sensitivity
result_df[i,3] <- overall.specificity
}
# check the datasets for the nth partition
# train set
df[train.idxs[[n]], ]
# test set
df[-train.idxs[[n]], ]
Put your code in a function and do a lapply():
result_df <- matrix(ncol=3, nrow=100)
colnames(result_df)<-c("Acc", "Sens", "Spec")
SIMg <- function(g) {
# Divide into Train and test set
smp_size <- floor(0.8 * nrow(mydata1))
train_ind <- sample(seq_len(nrow(mydata1)), size = smp_size)
train <- mydata1[train_ind, ]
test <- mydata1[-train_ind, ]
REST OF THE CODE
return(list(train=train, test=test, ...))
}
L <- lapply(1:100, SIMg)
The resulting list L has 100 elements, each element is a list containing the two dataframes and your results for one simulation run.
To get separate lists trainlist and testlist you can do:
trainlist <- lallpy(L, '[[', "train")
testlist <- lallpy(L, '[[', "test")

Resources