Error in x[ind, ] - centerI : non-conformable arrays - r

I have a time series data set. I would like to cluster it using different clustering techniques. Then, I would like to evaluate each method. I tried an R code from clusterSim package. However, I got an error"
Error in x[ind, ] - centerI : non-conformable arrays. I have no idea why I got this error.
Here is my code:
library(dtwclust)
library(Tsclust)
library(dplyr)
library(clusterSim)
data("interest.rates")
rate.change <- diff(log(interest.rates), 1)
md <- diss(rate.change, "AR.MAH")$p_value
min_nc=2
max_nc=8
res <- array(0, c(max_nc-min_nc+1, 2))
res[,1] <- min_nc:max_nc
clusters <- NULL
for (nc in min_nc:max_nc)
{
hc <- hclust(md, method="average")
cl2 <- cutree(hc, k=nc)
res[nc-min_nc+1, 2] <- DB <- index.DB(rate.change, cl2, md,centrotypes="medoids")$DB
clusters <- rbind(clusters, cl2)
}rate.change <- diff(log(interest.rates),1)
The original code is as follows:
# Example 3
library(clusterSim)
data(data_ratio)
md <- dist(data_ratio, method="euclidean")
# nc - number_of_clusters
min_nc=2
max_nc=8
res <- array(0, c(max_nc-min_nc+1, 2))
res[,1] <- min_nc:max_nc
clusters <- NULL
for (nc in min_nc:max_nc)
{
hc <- hclust(md, method="complete")
cl2 <- cutree(hc, k=nc)
res[nc-min_nc+1, 2] <- DB <- index.DB(data_ratio, cl2, centrotypes="centroids")$DB
clusters <- rbind(clusters, cl2)
}

Related

Neural Networks in RStudio

I am trying to predict the variable price range which has 4 levels (0,1,2,3) with the neural network.
But the table with the results just have 2 lines, instead of 4.
I don't know why the table is not 4x4.
The idx only has ones and fours. And the pred only has 0 and 3.
Sometimes the table has 3 lines, but never 4. And most of the times has only 2.
Please help.
THAANK YOUU
#install.packages("rJava")
library(rJava)
#install.packages("xlsx")
library(xlsx)
#Read file of dataToModelset
setwd("C:/Users/Utilizador/Desktop/AE")
dataread <- read.xlsx("datasetDeleteOutliers.xlsx",1)
dataread <-datasetDeleteOutliers
dataread <-datasetCorrelation
dataread <-datasetMedian
dataread <-datasetNearestNeighbor
#
data <- data.frame(dataread)
str(data)
data$clock_speed=as.numeric(data$clock_speed)
data$m_dep=as.numeric(data$m_dep)
str(data)
# Search for binary variables
is_binary <- function(x) length(unique(x)) == 2
sapply(data, is_binary)
summary(data)#para ver o resumo das carateristicas dos dados
#data normalization
#z_score_function <- function(y) {(y - mean(y, na.rm=TRUE))/(sd(y,na.rm=TRUE))}
#normed <- apply(data,2, z_score_function)
#dataNormed <- data.frame(normed)
#normalization <- function(y) {
#return ((y - min(y)) / (max(y) - min(y)))
#}
# Min-Max Normalization
#data$ram <- (data$ram - min(data$ram)) / (max(data$ram) - min(data$ram))
#data$px_height <- (data$px_height - min(data$px_height)) / (max(data$px_height) - min(data$px_height))
#data$px_width <- (data$px_width - min(data$px_width)) / (max(data$px_width) - min(data$px_width))
#data$battery_power <- (data$battery_power - min(data$battery_power)) / (max(data$battery_power) - min(data$battery_power))
# Declare to R which variables are numeric and which are categorical/nominal to R
cols_to_factors <- c('blue','dual_sim','four_g','three_g','touch_screen','wifi')
dataToModel <- data.frame(lapply(data[,cols_to_factors],as.factor))
dataToModel <- cbind(data[,!colnames(data)%in%cols_to_factors],dataToModel)
rm(cols_to_factors)
## Reordering the columns
dataToModel <- dataToModel[,c(1,16,2,17,3,18,4:14,19,20,21,15)]
## Variable to coerce to integer
dataToModel$n_cores <- as.integer(dataToModel$n_cores)
dataToModel$price_range<-as.factor(dataToModel$price_range)
#IDs
dataToModel<-cbind(c(1:nrow(dataToModel)),dataToModel)
colnames(dataToModel)[1]<-"id"
str(dataToModel)
#MOdel Neural Networks
library(neuralnet)
library(dplyr)
# Make training and validation data
set.seed(123)
train <- sample(nrow(dataToModel), nrow(dataToModel)*0.7)
test <- seq(nrow(dataToModel))[-train]
train_N <- dataToModel[train,]
test_N <- dataToModel[test,]
# Binarize the categorical output
train_N <- cbind(train_N, train_N$price_range == '0')
train_N <- cbind(train_N, train_N$price_range == '1')
train_N <- cbind(train_N, train_N$price_range == '2')
train_N <- cbind(train_N, train_N$price_range == '3')
names(train_N)[23:26] <- c('level_0', 'level_1', 'level_2','level_3')
# Fit model
nn <- neuralnet(
level_0 + level_1 + level_2+ level_3 ~ ram+px_height+px_width+battery_power,
data=train_N, hidden = 4,lifesign = "full")
print(nn)
plot(nn)
comp <- compute(nn, test_N[,-22])
pred.weights <- comp$net.result
idx <- apply(pred.weights,1, which.max)
idx
pred <- c('0','1','2','3')[idx]
pred
table(pred, test_N$price_range)
accuracy_NN<-sum(pred==test_N$price_range)/nrow(test_N)
accuracy_NN

Requires numeric/complex matric/vector argument

So I have a large data set that I have imported and split up. I've made sure to attach everything and tried to run a code to determine the number of breakpoints using AIC.
rm(list=ls())
library(Matching)
library(segmented)
dinosaurs=read.csv("C:/Users/user/Desktop/NEW PLOTS FOR DINOS/centrum_input_fin.csv")
attach(dinosaurs)
names(dinosaurs)
dino_names <- names(dinosaurs)
#NEED TO EXPORT FILES (EXPORT THE ALL_DATA_PLUS_SORTED OUT)
all_data_plus_sorted<-NULL
for(j in 1:length(dino_names))
{
with_gaps<-eval(parse(text = dino_names[j]))
gaps <- which(is.na(with_gaps))
non_gaps <-which(1:length(with_gaps) %in%gaps==FALSE)
sorted_without_gaps <- sort(with_gaps[!is.na(with_gaps)],decreasing=TRUE)
ordered_with_gaps<-rep(NA,length(with_gaps))
for(k in 1:length(non_gaps))
{
ordered_with_gaps[non_gaps[k]] <- sorted_without_gaps[k]
}
to_export<-cbind(with_gaps,ordered_with_gaps)
colnames(to_export)<-c(paste(dino_names[j],"_actual_with_gaps",sep=""),paste(dino_names[j],"_ordered_with_gaps",sep=""))
all_data_plus_sorted<- cbind(all_data_plus_sorted,to_export)
}
all_data_plus_sorted
attach(as.data.frame(all_data_plus_sorted))
print(dinosaurs)
detach(as.data.frame(all_data_plus_sorted))
detach(dinosaurs)
#split species
Dyoplosaurus_acutosquameus_ROM734 <- Dyoplosaurus_acutosquameus_ROM734[!is.na(Dyoplosaurus_acutosquameus_ROM734)]
Staurikosaurus_pricei <- Staurikosaurus_pricei[!is.na(Staurikosaurus_pricei)]
Opistocoelocaudia_skarzynskii <- Opistocoelocaudia_skarzynskii[!is.na(Opistocoelocaudia_skarzynskii)]
Stegosaurus_stenops._NHMUKPVR36730 <- Stegosaurus_stenops._NHMUKPVR36730[!is.na(Stegosaurus_stenops._NHMUKPVR36730)]
Giraffatitan_brancai <- Giraffatitan_brancai[!is.na(Giraffatitan_brancai)]
Camptosaurus <- Camptosaurus[!is.na(Camptosaurus)]
Camptosaurus_prestwichii <- Camptosaurus_prestwichii[!is.na(Camptosaurus_prestwichii)]
A_greppini <- A_greppini[!is.na(A_greppini)]
Astrophocaudia_slaughteri_SMU61732 <- Astrophocaudia_slaughteri_SMU61732[!is.na(Astrophocaudia_slaughteri_SMU61732)]
Tastavinsaurus_sanzi_gen_MPZ999 <- Tastavinsaurus_sanzi_gen_MPZ999[!is.na(Tastavinsaurus_sanzi_gen_MPZ999)]
MOZ_Pv1221 <- MOZ_Pv1221[!is.na(MOZ_Pv1221)]
Mamenchisaurus <- Mamenchisaurus[!is.na(Mamenchisaurus)]
Bromtosaurus_CMNo3018 <- Bromtosaurus_CMNo3018[!is.na(Bromtosaurus_CMNo3018)]
Lufengosaurus_Hueni <- Lufengosaurus_Hueni[!is.na(Lufengosaurus_Hueni)]
Mamenchisaurus_hochuanensi <- Mamenchisaurus_hochuanensi[!is.na(Mamenchisaurus_hochuanensi)]
Spinosaurus_FSACKK11888 <- Spinosaurus_FSACKK11888[!is.na(Spinosaurus_FSACKK11888)]
Buitreraptor_MPCNPV370 <- Buitreraptor_MPCNPV370[!is.na(Buitreraptor_MPCNPV370)]
Buitreraptor_MPCA245 <- Buitreraptor_MPCA245[!is.na(Buitreraptor_MPCA245)]
Huabeisaurus_allocotus_HBV20001 <- Huabeisaurus_allocotus_HBV20001[!is.na(Huabeisaurus_allocotus_HBV20001)]
Tethyshadros_insularis_SC57021 <- Tethyshadros_insularis_SC57021[!is.na(Tethyshadros_insularis_SC57021)]
Compsognathus_longipes_CNJ79 <- Compsognathus_longipes_CNJ79[!is.na(Compsognathus_longipes_CNJ79)]
Archaeopteryx12 <- Archaeopteryx12[!is.na(Archaeopteryx12)]
Sinosauropteryx_NIGP127586 <- Sinosauropteryx_NIGP127586[!is.na(Sinosauropteryx_NIGP127586)]
Sinosauropteryx_NIGP_127587 <- Sinosauropteryx_NIGP_127587[!is.na(Sinosauropteryx_NIGP_127587)]
Tetonosaurus_tilletti_AMNH3040 <- Tetonosaurus_tilletti_AMNH3040[!is.na(Tetonosaurus_tilletti_AMNH3040)]
Bambiraptor_feinbergi_FIP001 <- Bambiraptor_feinbergi_FIP001[!is.na(Bambiraptor_feinbergi_FIP001)]
Seimosaurus.halli_NMMNH3690 <- Seimosaurus.halli_NMMNH3690[!is.na(Seimosaurus.halli_NMMNH3690)]
Diluvicursor_pickeringi_NMVP221080 <- Diluvicursor_pickeringi_NMVP221080[!is.na(Diluvicursor_pickeringi_NMVP221080)]
Zhejiungosuurus_lishuiensis_ZMNHM8718 <- Zhejiungosuurus_lishuiensis_ZMNHM8718[!is.na(Zhejiungosuurus_lishuiensis_ZMNHM8718)]
Tianyulong_confuciusi_STMN.263 <- Tianyulong_confuciusi_STMN.263[!is.na(Tianyulong_confuciusi_STMN.263)]
Lusotitan_atalaiensis <- Lusotitan_atalaiensis[!is.na(Lusotitan_atalaiensis)]
Nemegtonykus_citus_MPCD100203 <- Nemegtonykus_citus_MPCD100203[!is.na(Nemegtonykus_citus_MPCD100203)]
Elaphrosaurus_bambergi_MBR4960 <- Elaphrosaurus_bambergi_MBR4960[!is.na(Elaphrosaurus_bambergi_MBR4960)]
Nomingia_gobiensis_GIN100119 <- Nomingia_gobiensis_GIN100119[!is.na(Nomingia_gobiensis_GIN100119)]
Nomingia_gobiensis_MPCD100119 <- Nomingia_gobiensis_MPCD100119[!is.na(Nomingia_gobiensis_MPCD100119)]
Chirostenotes_pergracilis <- Chirostenotes_pergracilis[!is.na(Chirostenotes_pergracilis)]
Seismosaurus_hallorum_NMMNHP3690 <- Seismosaurus_hallorum_NMMNHP3690[!is.na(Seismosaurus_hallorum_NMMNHP3690)]
Heterodontosaurus_tucki_SAMPKK1332 <- Heterodontosaurus_tucki_SAMPKK1332[!is.na(Heterodontosaurus_tucki_SAMPKK1332)]
Jianianhualong_tengi_DLXH1218 <- Jianianhualong_tengi_DLXH1218[!is.na(Jianianhualong_tengi_DLXH1218)]
Yinlong_downsi_IVPPV18685 <- Yinlong_downsi_IVPPV18685[!is.na(Yinlong_downsi_IVPPV18685)]
Neimongosaurus_yangi_LHV0001 <- Neimongosaurus_yangi_LHV0001[!is.na(Neimongosaurus_yangi_LHV0001)]
Magnapaulia_laticaudus_LACM17715 <- Magnapaulia_laticaudus_LACM17715[!is.na(Magnapaulia_laticaudus_LACM17715)]
Ouranosaurus_nigeriensis <- Ouranosaurus_nigeriensis[!is.na(Ouranosaurus_nigeriensis)]
Dreadnoughtus_schrani_MPMPV1156 <- Dreadnoughtus_schrani_MPMPV1156[!is.na(Dreadnoughtus_schrani_MPMPV1156)]
Pectodens_zhenyuensis_IVPPV18578 <- Pectodens_zhenyuensis_IVPPV18578[!is.na(Pectodens_zhenyuensis_IVPPV18578)]
Dilophosaurus_wetherilli <- Dilophosaurus_wetherilli[!is.na(Dilophosaurus_wetherilli)]
Gobihadros_mongoliensis_MPCD100746 <- Gobihadros_mongoliensis_MPCD100746[!is.na(Gobihadros_mongoliensis_MPCD100746)]
Gobihadros_mongoliensis_MPCD100755 <- Gobihadros_mongoliensis_MPCD100755[!is.na(Gobihadros_mongoliensis_MPCD100755)]
Auroraceratops_rugosus_GJ07913 <- Auroraceratops_rugosus_GJ07913[!is.na(Auroraceratops_rugosus_GJ07913)]
Patagotitan_mayorum_MPEFPV <- Patagotitan_mayorum_MPEFPV[!is.na(Patagotitan_mayorum_MPEFPV)]
Eoraptor_lunensi_PVSJ512 <- Eoraptor_lunensi_PVSJ512[!is.na(Eoraptor_lunensi_PVSJ512)]
Corythosaurus_casuarius <- Corythosaurus_casuarius[!is.na(Corythosaurus_casuarius)]
Caihong._Juji_PMoLB00175 <- Caihong._Juji_PMoLB00175[!is.na(Caihong._Juji_PMoLB00175)]
Eosinopteryx_brevipenna_YFGPT5197 <- Eosinopteryx_brevipenna_YFGPT5197[!is.na(Eosinopteryx_brevipenna_YFGPT5197)]
Rahonavis_ostromi_UA8656 <- Rahonavis_ostromi_UA8656[!is.na(Rahonavis_ostromi_UA8656)]
Changyuraptor_yangi_HGB016 <- Changyuraptor_yangi_HGB016[!is.na(Changyuraptor_yangi_HGB016)]
Herrerasaurus_ischigualastensis_PVL2566 <- Herrerasaurus_ischigualastensis_PVL2566[!is.na(Herrerasaurus_ischigualastensis_PVL2566)]
Herrerasaurus_ischigualastensis_UNSJ53 <- Herrerasaurus_ischigualastensis_UNSJ53[!is.na(Herrerasaurus_ischigualastensis_UNSJ53)]
Ischioceratops_zhuchengensis <- Ischioceratops_zhuchengensis[!is.na(Ischioceratops_zhuchengensis)]
Koreaceratops_hwaseongensis <- Koreaceratops_hwaseongensis[!is.na(Koreaceratops_hwaseongensis)]
# CHOOSE SAMPLE TO ANALYSE
#_________________________________________________________________________________________________
# choose sample
name_to_test <- "Koreaceratops_hwaseongensis"
y_val <- eval(parse(text = paste(name_to_test,"_actual_with_gaps",sep="")))
x_val<-1:length(y_val)
# USE AIC TO DECIDE HOW MANY BREAKS TO USE
#_________________________________________________________________________________________________
# extract AIC for models with 1-3 breakpoints
my_max_it=10
all_mods<-NULL
for(h in 1:4)
{
mod1<-segmented(lm(y_val~x_val),seg.Z=~x_val,psi=NA,control=seg.control(K=h,quant=TRUE,it.max=my_max_it),model=TRUE,nboot=50)
all_mods<-rbind(all_mods,c(h,extractAIC(mod1)[2]))
}
all_mods
my_K<-subset(all_mods,all_mods[,2]==min(all_mods[,2]))[1]
When i run the last section of the code i get the error Error in
crossprod(x, y) :
requires numeric/complex matrix/vector arguments
Not too sure why because I have put it in a data frame, is it because I'm importing the file incorrectly? Not sure how to fix.

R vector memory exhausted for calculating tensor products

I am calculating the tensor products of 60,000 pairs of 28*28 matrices in RStudio (version 3.5.2), and the console shows me "Error: vector memory exhausted (limit reached?)". I don't think my MacBook Pro would have such low capacity (16GB RAM). I tried the mclapply method for parallel computing but still don't work. Can anyone provide me some insights? Thanks a lot!
If necessary, the following are my codes. I cannot run the last line.
install.packages("keras")
library(keras)
install_keras()
install_keras(method = "conda")
library(keras)
mnist <- dataset_mnist()
str(mnist)
trainx <- mnist$train$x
trainy <- mnist$train$y
testx <- mnist$test$x
testy <- mnist$test$y
trainxr <- trainx
trainxg <- trainx
trainxb <- trainx
testxr <- testx
testxg <- testx
testxb <- testx
#training data
i <- 1
for(i in i:60000){
randomr <- sample (0:255, 1)
randomg <- sample (0:255, 1)
randomb <- sample (0:255, 1)
trainxr[i,,] <- (randomr/255)*(trainx[i,,]/255)
trainxg[i,,] <- (randomg/255)*(trainx[i,,]/255)
trainxb[i,,] <- (randomb/255)*(trainx[i,,]/255)
i <- i+1
}
#testing data
j <- 1
for(j in j:10000){
randomr <- sample (0:255, 1)
randomg <- sample (0:255, 1)
randomb <- sample (0:255, 1)
testxr[j,,] <- (randomr/255)*(testx[j,,]/255)
testxg[j,,] <- (randomg/255)*(testx[j,,]/255)
testxb[j,,] <- (randomb/255)*(testx[j,,]/255)
j <- j+1
}
#for training
k <- 1
for(k in k:60000){
randomminus <- sample (0:255, 1)
matrixminus <- matrix((randomminus/255):(randomminus/255), nrow = 28, ncol = 28)
trainxr[k,,] <- trainxr[k,,] - matrixminus
trainxr[k,,] <- abs(trainxr[k,,])
trainxg[k,,] <- trainxg[k,,] - matrixminus
trainxg[k,,] <- abs(trainxg[k,,])
trainxb[k,,] <- trainxb[k,,] - matrixminus
trainxb[k,,] <- abs(trainxb[k,,])
k <- k+1
}
#for testing
l <- 1
for(l in l:10000){
randomminus <- sample (0:255, 1)
matrixminus <- matrix((randomminus/255):(randomminus/255), nrow = 28, ncol = 28)
trainxr[l,,] <- trainxr[l,,] - matrixminus
trainxr[l,,] <- abs(trainxr[l,,])
trainxg[l,,] <- trainxg[l,,] - matrixminus
trainxg[l,,] <- abs(trainxg[l,,])
trainxb[l,,] <- trainxb[l,,] - matrixminus
trainxb[l,,] <- abs(trainxb[l,,])
l <- l+1
}
#tensor product
stepone <- matrix(1:1, nrow=21952, ncol=28)
steptwo <- matrix(1:1, nrow=28, ncol=28)
trainxtensor_a <- trainxr %x% trainxg
I'm guessing you intending to collapse the 2nd and 3rd dimensions in that tensor product. Perhaps you want something like this:
library(tensor)
trainxtensor_a <- tensor(trainxr, trainxg, c(2,3), c(2,3))
Although you should try a smaller dataset to check if it is doing what you expect first:
trainxtensor_a <- tensor(trainxr[1:5,,], trainxg[1:5,,], c(2,3), c(2,3))

kmeans results as initial parameters in Mclust

I am doing Gaussian mixture models. I have done kmeans on the dataset and I want to use the means, variances and the size for the initial parameters for the em algorithm in R. I found that the parameters is a list of 3 and I tried to do the same thing but it gives me the following error :
Error in array(x, c(length(x), 1L), if (!is.null(names(x))) list(names(x), :
'data' must be of a vector type, was 'NULL'
My code
l <- kmeans(iris[,-5],centers=3)
pi <- l$size/length(iris[,1])
my <- t(l$centers)
sig <- vector("list", 3)
new <- as.data.frame(cbind(iris[,-5],l$cluster))
for (i in 1:3) {
subdata<-subset(new[,1:4],new[,5]==i);
sig[[i]]<-cov(subdata)
}
par <- vector("list",3)
par[[1]] <- pi; par[[2]] <- my; par[[3]] <- sig
kk <- em(modelName = msEst$modelName, data = iris[,-5],parameters = par)
Can someone please tell how should I assign the kmeans results as initial parameters?
Following is a quick example of what you seem to be after. The main thing you have to do is the get the parameters argument in the correct form. The tickly bit is with the variance list. There is a bit of help with this if you use the mclustVariance function.
library(mclust)
g <- 3
dat <- iris[, -5]
p <- ncol(dat)
n <- nrow(dat)
k_fit <- kmeans(dat, centers=g)
par <- vector("list", g)
par$pro <- k_fit$size/n
par$mean <- t(k_fit$centers)
sigma <- array(NA, c(p, p, g))
new <- as.data.frame(cbind(dat, k_fit$cluster))
for (i in 1 : g) {
subdata <- subset(new[, 1 : p], new[, (p+1)]==i)
sigma[,, i] <- cov(subdata)
}
variance <- mclustVariance("EEE", d = p, G = g)
par$variance <- variance
par$variance$sigma <- sigma
kk <- em(modelName = "EEE", data = dat, parameters = par)

Using RStudio when I click on Run it executes properly but use source and it gives me a error message

I have the following code which optimizes and is suppose to export the results to a csv. All works well but the last line of code
write.csv(solutionsMatrix, file="SLATE/DK.A1.CSV")
When i click on Run it executes correctly but when i click on Source it gives me a error message
Error in source("~/.active-rstudio-document", echo = TRUE) :
~/.active-rstudio-document:76:34: unexpected symbol
75:
76: write.csv(solutionsMatrix, file="SLATE
With a up arrow point at the S in SLATE
Why is it doing this?? Thanks for looking
setwd("C:/DFS/NFL/R DATA")
library('lpSolve')
data <-read.csv ("DRAFTKINGS.CSV")
#Convert salary to numeric
data$Salary <- as.numeric(gsub(",","",data$Salary), data$Salary)
data$MAX.TEAM <- as.numeric(gsub(",","",data$MaxTeam), data$MaxTeam)
#Add binary valeus for positions 'Constraint Vectors'
data <- cbind(data, X=1)
data <- cbind(data, QB=ifelse(data$Pos=="QB",1,0))
data <- cbind(data, RB=ifelse(data$Pos=="RB", 1,0))
data <- cbind(data, WR=ifelse(data$Pos=="WR", 1,0))
data <- cbind(data, TE=ifelse(data$Pos=="TE", 1,0))
data <- cbind(data, DEF=ifelse(data$Pos=="DEF", 1,0))
#Objective Function. sum of proj pts
f.obj <- data$DKA1
#Constraints
num_X <- 9
num_QB <- 1
min_RB <- 2
min_WR <- 3
min_TE <-1
num_DEF <-1
max_RB <- 3
max_WR <-4
max_TE <-2
max_team_cost <- 50000
max_player_from_a_team <-data [2, 5]
#Constraints for max players from team
clubs <- sort(unique(data$Team))
team_constraint_vector <- c()
team_constraint_dir <- c()
team_constraint_rhs <- c()
for(i in 1:length(clubs)){
temp <- data$Team==as.character(clubs[i])
temp[temp==T] <- 1
temp[temp==F] <- 0
team_constraint_vector <- c(team_constraint_vector, temp)
team_constraint_dir <- c(team_constraint_dir, "<=")
team_constraint_rhs <- c(team_constraint_rhs, max_player_from_a_team)
}
solutions <- list()
solutionsMatrix <- matrix(, nrow=0, ncol=21)
for(i in 1:1){
f.con <- matrix (c(data$X,data$QB , data$RB ,data$RB ,data$WR, data$WR,data$TE, data$TE,data$DEF , data$Salary, team_constraint_vector), nrow=(10+length(clubs)), byrow=TRUE)
f.dir <- c("=","=", ">=","<=",">=","<=",">=","<=", "=", "<=", team_constraint_dir)
f.rhs <- c(num_X,num_QB,min_RB,max_RB,min_WR, max_WR,min_TE, max_TE, num_DEF, max_team_cost, team_constraint_rhs)
x <- lp ("max", f.obj, f.con, f.dir, f.rhs, all.bin=TRUE)
x
solutions[[i]] <- data[which(x$solution==1),]
solutionsMatrix <- rbind(solutionsMatrix, c("DKA1, sum(solutions[[i]]$DKA1), sum(solutions[[i]]$Salary), toString(solutions[[i]]$Name[4]),toString(solutions[[i]]$Pos[4]), toString(solutions[[i]]$Name[5]),toString(solutions[[i]]$Pos[5]), toString(solutions[[i]]$Name[8]),toString(solutions[[i]]$Pos[8]), toString(solutions[[i]]$Name[9]),toString(solutions[[i]]$Pos[9]), toString(solutions[[i]]$Name[6]),toString(solutions[[i]]$Pos[6]), toString(solutions[[i]]$Name[7]), toString(solutions[[i]]$Pos[7]),toString(solutions[[i]]$Name[2]),toString(solutions[[i]]$Pos[2]), toString(solutions[[i]]$Name[3]),toString(solutions[[i]]$Pos[3]), toString(solutions[[i]]$Name[1]),toString(solutions[[i]]$Pos[1])))
}
solutions[[1]]
#Solutions Matrix Optimization
solutionsMatrix
write.csv(solutionsMatrix, file="SLATE/DK.A1.CSV")
###################################################################

Resources