my dataset is :
x=data.frame(v1=c(97 , 97 , 85 , 84 , 90 , 80 , 81 , 90 , 80, 70, 90 , 90, 90 ,95 , 88 , 99),
+ v2=c(99 , 91 , 91 ,83 , 99 , 95 , 74 , 88 , 82 , 80 , 96 , 87 , 92 , 96 , 88, 95),
+ v3=c( 89 ,93 , 87 , 80 , 96 , 96 , 75 , 90 , 78, 86 , 92 ,88 , 80, 88 , 98 ,98),
+ v4=c( 89 , 97 ,91 , 86 , 95 , 95 , 89 , 88 , 75, 82 , 99, 92 , 95, 92 , 90, 98),
+ v5=c( 99 ,90 , 93 ,91 , 90 , 90 , 77 , 92 , 85, 76 , 90, 96 , 90, 90 , 90, 92))
> x
v1 v2 v3 v4 v5
1 97 99 89 89 99
2 97 91 93 97 90
3 85 91 87 91 93
4 84 83 80 86 91
5 90 99 96 95 90
6 80 95 96 95 90
7 81 74 75 89 77
8 90 88 90 88 92
9 80 82 78 75 85
10 70 80 86 82 76
11 90 96 92 99 90
12 90 87 88 92 96
13 90 92 80 95 90
14 95 96 88 92 90
15 88 88 98 90 90
16 99 95 98 98 92
I used rpart package to apply decision tree as follows :
# Classification Tree with rpart
library(rpart)
fit <- rpart(v5 ~ v1+v2+v3+v4,
method="class", data=x)
printcp(fit) # display the results
Classification tree:
rpart(formula = v5 ~ v1 + v2 + v3 + v4, data = x, method = "class")
Variables actually used in tree construction:
character(0)
Root node error: 9/16 = 0.5625
n= 16
CP nsplit rel error xerror xstd
1 0.01 0 1 0 0
> summary(fit) # detailed summary of splits
Call:
rpart(formula = v5 ~ v1 + v2 + v3 + v4, data = x, method = "class")
n= 16
CP nsplit rel error xerror xstd
1 0.01 0 1 0 0
Node number 1: 16 observations
predicted class=90 expected loss=0.5625 P(node) =1
class counts: 1 1 1 7 1 2 1 1 1
probabilities: 0.062 0.062 0.062 0.438 0.062 0.125 0.062 0.062 0.062
plot tree
# plot tree
plot(fit, uniform=TRUE,
+ main="Classification Tree ")
Error in plot.rpart(fit, uniform = TRUE, main = "Classification Tree ") :
fit is not a tree, just a root
text(fit, use.n=TRUE, all=TRUE, cex=.8)
Error in text.rpart(fit, use.n = TRUE, all = TRUE, cex = 0.8) :
fit is not a tree, just a root
what is my wrong while I applied rpart ? why it give me error with tree plot? how to fix this error Error :
fit is not a tree, just a root
You use method="class" if you are building a classification tree and method="anova" if you are building a regression tree. It looks like you have a continuous response, so you should be building a regression tree (i.e. method="anova").
You are using the RPART's default control parameters. With your data set RPART is unable to adhere to default values and create a tree (branch splitting)
rpart.control(minsplit = 20, minbucket = round(minsplit/3), cp = 0.01,
maxcompete = 4, maxsurrogate = 5, usesurrogate = 2, xval = 10,
surrogatestyle = 0, maxdepth = 30, ...)
Adjust the control parameters according to the data set.
e.g :
t <- rpart(formula = v5 ~ v1 + v2 + v3 + v4, data = x, method = anova",control =rpart.control(minsplit = 1,minbucket=1, cp=0))
But be aware this could create an over fitting decision tree.
I ran the following code with your x data frame and got a tree as shown below:
library(rpart)
library(rattle)
library(rpart.plot)
library(RColorBrewer)
fit <- rpart(v5 ~ v1+v2+v3+v4,
method="anova",
data=x,
control = rpart.control(minsplit = 6, cp = 0.01))
fancyRpartPlot(fit) #from RColorBrewer package
Note that your method should be anova as v5 is a continuous variable, and you have to override the control parameters control = rpart.control(...) to adjust the depth of the tree.
Related
Since last version, mlr3tuning package supports (custom) instantiated resampling in AutoTuner class: https://github.com/mlr-org/mlr3tuning/releases/tag/v0.17.2
I have tried to construct rolling window CV's with custom resmapling as in the following link https://towardsdatascience.com/time-series-nested-cross-validation-76adba623eb9 (figure 1).
I want to tune hyperparameters on (let's say )row ids 1:1000 (for example 1:800 train and 801:100 test). and then I would like to evaluate the model on test set, say 1001:1100.
Her is my try:
library(mlr3)
library(mlr3tuning)
library(mlr3pipelines)
library(mlr3learners)
# task
task = tsk("iris")
task_ = task$clone()
data_ = task_$data()
data_ = cbind(data_, monthid = c(rep(1, 30), rep(2, 30), rep(3, 30), rep(4, 30), rep(5, 30)))
task = as_task_classif(data_, target = "Species")
# inner custom rolling window resampling
custom = rsmp("custom")
task_ <- task$clone()
task_$set_col_roles("monthid", "group")
groups = task_$groups
rm(task_)
groups_v <- groups[, unique(group)]
train_length <- 2
test_length <- 1
train_groups <- lapply(0:(length(groups_v)-(train_length+1)), function(x) x + (1:train_length))
test_groups <- lapply(train_groups, function(x) tail(x, 1) + test_length)
train_sets <- lapply(train_groups, function(x) groups[group %in% groups_v[x], row_id])
test_sets <- lapply(test_groups, function(x) groups[group %in% groups_v[x], row_id])
custom$instantiate(task, train_sets, test_sets)
# outer custom rolling window resampling
customo = rsmp("custom")
task_ <- task$clone()
task_$set_col_roles("monthid", "group")
groups = task_$groups
rm(task_)
groups_v <- groups[, unique(group)]
train_length_out <- train_length + test_length
test_length_out <- 1
train_groups_out <- lapply(0:(length(groups_v)-(train_length_out+1)), function(x) x + (1:train_length_out))
test_groups_out <- lapply(train_groups_out, function(x) tail(x, 1) + test_length_out)
train_sets_out <- lapply(train_groups_out, function(x) groups[group %in% groups_v[x], row_id])
test_sets_out <- lapply(test_groups_out, function(x) groups[group %in% groups_v[x], row_id])
customo$instantiate(task, train_sets_out, test_sets_out)
# inspect custom cv's
custom$train_set(1)
custom$test_set(1)
(max(custom$train_set(1)) + 1) == head(custom$test_set(1), 1) # test set starts after train set
customo$train_set(1)
customo$test_set(1)
(max(customo$train_set(1)) + 1) == head(customo$test_set(1), 1) # test set starts after train set
all(c(custom$train_set(1), custom$test_set(1)) %in% customo$train_set(1)) # first outer set contains all inner sets
length(intersect(customo$test_set(1), c(custom$train_set(1), custom$test_set(1)))) == 0
# costruct graph
graph = po("removeconstants", id = "removeconstants_1", ratio = 0) %>>%
po("branch", options = c("nop_prep", "yeojohnson", "pca", "ica"), id = "prep_branch") %>>%
gunion(list(po("nop", id = "nop_prep"), po("yeojohnson"), po("pca", scale. = TRUE), po("ica"))) %>>%
po("unbranch", id = "prep_unbranch") %>>%
po("learner", learner = lrn("classif.rpart"))
plot(graph)
graph_learner = as_learner(graph)
as.data.table(graph_learner$param_set)[1:70, .(id, class, lower, upper)]
search_space = ps(
prep_branch.selection = p_fct(levels = c("nop_prep", "yeojohnson", "pca", "ica")),
pca.rank. = p_int(2, 6, depends = prep_branch.selection == "pca"),
ica.n.comp = p_int(2, 6, depends = prep_branch.selection == "ica"),
yeojohnson.standardize = p_lgl(depends = prep_branch.selection == "yeojohnson")
)
at = auto_tuner(
method = "random_search",
learner = graph_learner,
resampling = custom,
measure = msr("classif.acc"),
search_space = search_space
)
# resmpling
rr = resample(task, at, customo, store_models = TRUE)
I get an error:
INFO [09:46:49.340] [mlr3] Applying learner 'removeconstants_1.prep_branch.nop_prep.yeojohnson.pca.ica.prep_unbranch.classif.rpart.tuned' on task 'data_' (iter 1/2)
INFO [09:46:49.362] [mlr3] Applying learner 'removeconstants_1.prep_branch.nop_prep.yeojohnson.pca.ica.prep_unbranch.classif.rpart.tuned' on task 'data_' (iter 2/2)
Error: Train set 3 of inner resampling 'custom' contains row ids not present in task 'data_': {91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120}
The first outer training set contains these ids:
train_sets_out
#> [[1]]
#> [1] 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
This means row 1 to 90 are available in the inner resampling.
The third training set of the inner resampling needs rows 61 to 120 but rows 91 to 120 are not available.
train_sets
#> [[3]]
#> [1] 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
So that can't work. Check out figure 4.7 in the book to see how the outer and inner resampling work together.
I attempted to utilize the principles of linear regression and feature selection to predict a target variable (Y) based on a set of predictor variables (X1, X2, X3, X4, X5, X6, X7, and X8). I started by implementing a full model, which included all predictor variables, and then used stepwise regression to select the most relevant variables for my model through the use of backward, forward, and both selection methods. I then compared the performance of my model using AIC, BIC, and root mean squared error (RMSE) to determine the best model for my data. Finally, I used this best model to predict the value of Y for a specific set of predictor variable values and compared it to the actual value to assess the accuracy of my model. However, I encountered a problem in my data where the value of Y in the 39th semester was missing, so I couldn't evaluate the prediction results.
#Jeu de donnée : Classeur2.xlsx
setwd("D:/third year/alaoui")
# load
library(readxl)
data2 <- read_excel("D:/third year/alaoui/tpnote/Classeur2.xlsx")
data <- data2[-c(39),]
View(data)
# Analyse descriptive
summary(data)
str(data)
# analyse de correlation
#install.packages("psych")
library(psych)
# check if all values r numeric if not convert em
num_cols <- sapply(data, is.numeric)
data[, !num_cols] <- lapply(data[, !num_cols], as.numeric)
#
matrice_correlation <- cor(data[,c("Y","X1","X2","X4","X5...5","X5...6","X6","X7","X8")])
KMO(matrice_correlation)
cortest.bartlett(matrice_correlation, n = nrow(data))
# Analyse en composantes principales
library("FactoMineR")
library("factoextra")
library("corrplot")
p=PCA(data,graph=FALSE)
p
pca=PCA(data,ncp=2)
print(pca)
eig.val <- get_eigenvalue(pca)
eig.val
fviz_eig(pca)
fviz_pca_var(pca, col.var = "contrib", gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"))
# Régression linéaire
model <- lm(Y ~ X1 + X2 + X4 + X5...5 + X5...6 + X6 + X7 + X8, data = data)
summary(model)
#Vérification des hypothèses de la régression linéaire
#1. Linearité
par(mfrow = c(2, 2))
plot(model)
#2. Homoscédasticité
library(car)
ncvTest(model)
#3. Normalité des résidus
library(lmtest)
library(tseries)
residuals <- resid(model)
qqnorm(residuals)
qqline(residuals)
shapiro.test(residuals)
#4. Indépendance des résidus
plot(residuals ~ fitted(model))
durbinWatsonTest(model)
#Sélection de variables
# Fit the full model
full_model <- lm(Y ~ X1 + X2 + X4 + X5...5 + X5...6 + X6 + X7 + X8, data = data)
# Fit the null model (constant only)
null_model <- lm(Y ~ 1, data = data)
# Perform backward stepwise selection
backward_model <- step(full_model, direction = "backward")
# Perform forward stepwise selection
forward_model <- step(null_model, scope = list(lower = null_model, upper = full_model), direction = "forward")
# Perform both stepwise selection
both_model <- step(null_model, scope = list(upper = full_model), direction = "both")
# Compare AIC, BIC and RMSE for each model
AIC_full <- AIC(full_model)
AIC_backward <- AIC(backward_model)
AIC_forward <- AIC(forward_model)
AIC_both <- AIC(both_model)
BIC_full <- BIC(full_model)
BIC_backward <- BIC(backward_model)
BIC_forward <- BIC(forward_model)
BIC_both <- BIC(both_model)
RMSE_full <- sqrt(mean((resid(full_model))^2))
RMSE_backward <- sqrt(mean((resid(backward_model))^2))
RMSE_forward <- sqrt(mean((resid(forward_model))^2))
RMSE_both <- sqrt(mean((resid(both_model))^2))
#Print the model selection criteria for each model
cat("Full model:")
cat("\tAIC:", AIC_full, "\tBIC:", BIC_full, "\tRMSE:", RMSE_full, "\n")
cat("Backward model:")
cat("\tAIC:", AIC_backward, "\tBIC:", BIC_backward, "\tRMSE:", RMSE_backward, "\n")
cat("Forward model:")
cat("\tAIC:", AIC_forward, "\tBIC:", BIC_forward, "\tRMSE:", RMSE_forward, "\n")
cat("Both model:")
cat("\tAIC:", AIC_both, "\tBIC:", BIC_both, "\tRMSE:", RMSE_both, "\n")
#Select the model with the lowest AIC, BIC, and RMSE
model_names <- c("Full Model", "Backward Model", "Forward Model", "Both Model")
best_model <- model_names[which.min(c(AIC_full, AIC_backward, AIC_forward, AIC_both))]
print(best_model)
# predict the value of Y in the 39th semester
predicted_Y <- predict(backward_model, newdata = data.frame(X1 = 500, X2 = 100, X4 = 83, X5...5 = 30, X5...6= 50, X6 = 90, X7 = 300, X8 = 200))
print(predicted_Y)
# to make sure that its correct
#Calculate mean squared error
MSE <- mean((predicted_Y - data$Y[39])^2)
#Calculate root mean squared error
RMSE <- sqrt(MSE)
#Calculate R-squared value
R_squared <- summary(backward_model)$r.squared
#Print the results
print(paste("Predicted value of Y:", predicted_Y))
print(paste("Mean Squared Error:", MSE))
print(paste("Root Mean Squared Error:", RMSE))
print(paste("R-Squared value:", R_squared))
#Compare the predicted value with the actual value
print(paste("Actual value of Y:", data$Y[39]))
print(paste("Difference:", abs(predicted_Y - data$Y[39])))
#Plot the model
par(xpd = TRUE)
plot(backward_model,which=1)
abline(backward_model,col="blue")
#Plot the residuals
plot(backward_model, which=2)
#Normality test on residuals
shapiro.test(residuals(backward_model))
#Homoscedasticity test on residuals
ncvTest(backward_model)
#Linearity test on residuals
dwtest(backward_model)
this is my file.csv
X4 X5 X5 X6 X7 X8 Y
56 12 50 77 229 98 5540
59 9 17 89 177 225 5439
57 29 89 51 166 263 4290
58 13 107 40 258 321 5502
59 13 143 52 209 407 4872
60 11 61 21 180 247 4708
60 25 -30 40 213 328 4627
60 21 -45 32 201 298 4110
63 8 -28 12 176 218 4123
62 11 76 68 175 410 4842
65 22 144 52 253 93 5741
65 24 113 77 208 307 5094
64 14 128 96 195 107 5383
66 15 10 48 154 305 4888
67 22 -25 27 181 60 4033
67 23 117 73 220 239 4942
66 13 120 62 235 141 5313
68 8 122 25 258 291 5140
69 27 71 74 196 414 5397
71 18 4 63 279 206 5149
69 8 47 29 207 80 5151
70 10 8 91 213 429 4989
73 27 128 74 296 273 5927
73 16 -50 16 245 309 4704
73 32 100 43 276 280 5366
75 20 -40 41 211 315 4630
73 15 68 93 283 212 5712
74 11 88 83 218 118 5095
74 27 27 75 307 345 6124
77 20 59 88 211 141 4787
79 35 142 74 270 83 5036
77 23 126 21 328 398 5288
78 36 30 26 258 124 4647
78 22 18 95 233 118 5316
81 20 42 93 324 161 6180
80 16 -22 50 267 405 4801
81 35 148 83 257 111 5512
82 27 -18 91 267 170 5272
83 30 50 90 300 200 .
I have a dataset of successes, probabilities, and sample sizes that I am running binomial tests on.
Here is a sample of the data (note that the actual dataset has me run >100 binomial tests):
km n_1 prey_pred p0_prey_pred
<fct> <dbl> <int> <dbl>
80 93 12 0.119
81 1541 103 0.0793
83 316 5 0.0364
84 721 44 0.0796
89 866 58 0.131
I normally run this (example for first row):
n=93
p0=0.119
successes=12
binom.test(obs.successes, n, p0, "two.sided")
> Exact binomial test
data: 12 and 93
number of successes = 12, number of trials = 93, p-value = 0.74822
alternative hypothesis: true probability of success is not equal to 0.119
95 percent confidence interval:
0.068487201 0.214548325
sample estimates:
probability of success
0.12903226
Is there a way to systematically have it run multiple binomial tests on each row of data, and then storing all the output (p-value, confidence intervals, probability of success) as separate columns?
I've tried the solution proposed here, but I am clearly m
Using apply.
res <- t(`colnames<-`(apply(dat, 1, FUN=function(x) {
rr <- binom.test(x[3], x[2], x[4], "two.sided")
with(rr, c(x, "2.5%"=conf.int[1], estimate=unname(estimate),
"97.5%"=conf.int[2], p.value=unname(p.value)))
}), dat$km))
res
# km n_1 prey_pred p0_prey_pred 2.5% estimate 97.5% p.value
# 80 80 93 12 0.1190 0.068487201 0.12903226 0.21454832 7.482160e-01
# 81 81 1541 103 0.0793 0.054881013 0.06683971 0.08047927 7.307921e-02
# 83 83 316 5 0.0364 0.005157062 0.01582278 0.03653685 4.960168e-02
# 84 84 721 44 0.0796 0.044688325 0.06102635 0.08106220 7.311463e-02
# 89 89 866 58 0.1310 0.051245893 0.06697460 0.08572304 1.656621e-09
Edit
If you have multiple column sets, in wide format (and for some reason want to stay there)
dat2 <- `colnames<-`(cbind(dat, dat[-1]), c("km", "n_1.1", "prey_pred.1", "p0_prey_pred.1",
"n_1.2", "prey_pred.2", "p0_prey_pred.2"))
dat2[1:3,]
# km n_1.1 prey_pred.1 p0_prey_pred.1 n_1.2 prey_pred.2 p0_prey_pred.2
# 1 80 93 12 0.1190 93 12 0.1190
# 2 81 1541 103 0.0793 1541 103 0.0793
# 3 83 316 5 0.0364 316 5 0.0364
you may do:
res2 <- t(`colnames<-`(apply(dat2, 1, FUN=function(x) {
rr1 <- binom.test(x[3], x[2], x[4], "two.sided")
rr2 <- binom.test(x[6], x[5], x[7], "two.sided")
rrr1 <- with(rr1, c("2.5%.1"=conf.int[1], estimate.1=unname(estimate),
"97.5%.1"=conf.int[2], p.value.1=unname(p.value)))
rrr2 <- with(rr2, c("2.5%.1"=conf.int[1], estimate.1=unname(estimate),
"97.5%.1"=conf.int[2], p.value.1=unname(p.value)))
c(x, rrr1, rrr2)
}), dat2$km))
res2
# km n_1.1 prey_pred.1 p0_prey_pred.1 n_1.2 prey_pred.2 p0_prey_pred.2 2.5%.1
# 80 80 93 12 0.1190 93 12 0.1190 0.068487201
# 81 81 1541 103 0.0793 1541 103 0.0793 0.054881013
# 83 83 316 5 0.0364 316 5 0.0364 0.005157062
# 84 84 721 44 0.0796 721 44 0.0796 0.044688325
# 89 89 866 58 0.1310 866 58 0.1310 0.051245893
# estimate.1 97.5%.1 p.value.1 2.5%.1 estimate.1 97.5%.1 p.value.1
# 80 0.12903226 0.21454832 7.482160e-01 0.068487201 0.12903226 0.21454832 7.482160e-01
# 81 0.06683971 0.08047927 7.307921e-02 0.054881013 0.06683971 0.08047927 7.307921e-02
# 83 0.01582278 0.03653685 4.960168e-02 0.005157062 0.01582278 0.03653685 4.960168e-02
# 84 0.06102635 0.08106220 7.311463e-02 0.044688325 0.06102635 0.08106220 7.311463e-02
# 89 0.06697460 0.08572304 1.656621e-09 0.051245893 0.06697460 0.08572304 1.656621e-09
One could code this more nested, but I recommend to keep things easy so later others understand better what's going on, and probably including oneself.
Data:
dat <- read.table(text="km n_1 prey_pred p0_prey_pred
80 93 12 0.119
81 1541 103 0.0793
83 316 5 0.0364
84 721 44 0.0796
89 866 58 0.131 ", header=TRUE)
You can define a function for this as suggested in the comments:
my_binom <- function(x, n, p){
res <- binom.test(x, n, p)
out <- data.frame(res$p.value, res$conf.int[1], res$conf.int[2], res$estimate)
names(out) <- c("p", "lower_ci", "upper_ci", "p_success")
rownames(out) <- NULL
return(out)
}
Then you can apply it for each row
do.call("rbind.data.frame", apply(df, 1, function(row_i){
my_binom(x= row_i["prey_pred"], n= row_i["n_1"], p=
row_i["p0_prey_pred"])
}))
I created two variable called "low.income" and "mid.income" from survey, they are variables which obtained based on participants income. here you can see the variables how looks like:
low.income = 75 95 85 100 85 100 85 90 75 90 65 80 85 90 85 70 95 85 100 95 85 95 90 95 95
mid.income = 95 100 90 90 85 95 100 95 80
But when try to call aov(low.income~mid.income) it gives me Error in model.frame.default(formula = low.income ~ mid.income, drop.unused.levels = TRUE) :
variable lengths differ (found for 'mid.income')
So, what should i do ?
That is not correct, I think you are looking for t.test ie
t.test(low.income, mid.income, var.equal = TRUE)
To use the formula method, you have to create a dataframe with the level and the income. It should look like below:
data <- data.frame(level = rep(paste0(c("low","mid"),".income"),c(25,9)), income = c(low.income,mid.income))
level income
1 low.income 75
2 low.income 95
3 low.income 85
4 low.income 100
5 low.income 85
6 low.income 100
: : :
29 mid.income 90
30 mid.income 85
31 mid.income 95
32 mid.income 100
33 mid.income 95
34 mid.income 80
Now you could do:
t.test(income~level,data,var.equal = TRUE)
Well Since you are using aov, I will give you an example of how to do that:
aov(income~level,data)
These two will lead to the exact same result. You can run TukeyHSD to see that the results are the same.
NOTE: You only run ANOVA when you have more than 2 groups. If you only have 2 groups, run a t.test. Recall that ANOVA is a generalization of the t.test
I have some data to fit. The function is like :
y = a*exp(b*x) + c*exp(d*x) ,
where "a", "b" ,"c" and "d" are the coefficients
I want to use gnm package to fit the dual exponential function. However, the result seems not good.
Is any other package to do this ?
Can Java or other language do it?
library(gnm);
data = read.table("F:\\AP\\R\\data.txt", header = T);
x <- data$X1;
y <- data$Y1;
set.seed(1);
saved.fits <- list();
for(i in 1:60){
saved.fits[[i]] <- suppressWarnings(gnm(y ~ Exp(1+x, inst = 1)+ Exp(1+x, inst =2),verbose = FALSE))
}
table(round(unlist(sapply(saved.fits, deviance)), 4))
X1:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
Y1:
9514.833
9463.002
9386.4277
9320.292
9252.0957
9187.4775
9122.5947
9068.1279
9013.3232
8930.418
8875.416
8789.1973
8727.9355
8649.0547
8600.0693
8529.3359
8490.0801
8421.7842
8371.4688
8303.041
8256.1719
8193.1416
8159.1553
8091.3022
8028.9263
7966.6748
7893.2056
7819.4702
7710.0962
7613.6069
5609.2266
5573.5923
5537.665
5501.6279
5477.5825
5450.0518
5435.9521
5402.5327
5379.1743
5348.1226
5320.5049
5282.2158
5263.5146
5236.125
5216.4038
5188.0493
5170.293
5142.6416
5114.8125
5087.1606
5059.5898
5032.0352
5001.5537
4979.8364
4951.5854
4932.1138
4903.7363
4888.1841
4869.7168
4854.7617
I also have a question about can I use Matlab instead of R in the web server. Because I have to process some electrical signals by methods such as filter, smooth, fitting and so on. But I am afraid that the Matlab liabray will be crashed when the concurrent tasks increase.