Different output from auto.arima when parallelization is enabled - r

The information outputted by auto.arima() when enabling parallelization seems to differ from that outputted when not using parallelization. Specifically, when parallelization is enabled, the output does not specify the values of p, d, and q nor report the AICc or BIC information criteria.
Example without parallelization:
> model <- auto.arima(ts,xreg=tax.ts,ic=c("bic"),stepwise=FALSE,approximation=FALSE)
Series: ts
ARIMA(0,1,1)
Coefficients:
ma1 tax
0.4589 -0.7035
s.e. 0.0806 0.6969
sigma^2 estimated as 0.6028: log likelihood=-138.36
AIC=282.72 AICc=282.92 BIC=291.08
With paralellization:
> model <- auto.arima(ts,xreg=tax.ts,ic=c("bic"),stepwise=FALSE,approximation=FALSE,parallel=TRUE,num.cores=12)
Call:
auto.arima(x = structure(list(rwhitebread = c(89.2749786376953, 89.2966079711914,
88.9023666381836, 88.6773300170898, 89.0641860961914, 89.6114883422852, 89.5789642333984,
89.3341522216797, 89.0179290771484, 88.4036026000977, 88.309211730957, 87.6592483520508,
87.3339614868164, 87.2614440917969, 87.7638092041016, 87.3240356445312, 88.6289749145508,
90.0923461914062, 90.2542266845703, 90.6454772949219, 90.7220764160156, 90.4558944702148,
90.8726654052734, 91.7483978271484, 92.7035140991211, 92.5734100341797, 92.4802780151367,
92.6397476196289, 93.3086471557617, 92.8956298828125, 92.6798477172852, 94.3108444213867,
96.7846298217773, 99.8294296264648, 100.150718688965, 103.013710021973, 103.426765441895,
102.606864929199, 102.679161071777, 103.586181640625, 106.069953918457, 105.177772521973,
104.857940673828, 104.737091064453, 104.414154052734, 103.719192504883, 101.936988830566,
101.443199157715, 101.572723388672, 101.843193054199, 101.829467773438, 101.567581176758,
102.02978515625, 101.891319274902, 101.730346679688, 101.729293823242, 101.258888244629,
100.753799438477, 100.468338012695, 100.157859802246, 99.6511154174805, 99.5886764526367,
98.7140121459961, 98.9120025634766, 99.6159439086914, 100.186874389648, 99.477912902832,
99.2161636352539, 99.1324768066406, 99.4329071044922, 99.8656692504883, 100.098243713379,
100.050888061523, 100.484085083008, 100.259925842285, 101.157867431641, 102.265960693359,
102.379020690918, 102.103042602539, 102.0927734375, 101.972709655762, 101.002868652344,
99.5803604125977, 99.2621994018555, 100.411376953125, 101.165588378906, 101.440376281738,
101.992752075195, 102.306083679199, 102.090476989746, 101.909233093262, 105.202362060547,
108.684272766113, 108.776985168457, 108.79345703125, 108.924339294434, 108.688865661621,
108.240608215332, 107.647567749023, 106.971420288086, 107.167625427246, 106.995262145996,
107.178039550781, 107.176040649414, 106.977165222168, 106.254096984863, 104.939926147461,
104.52613067627, 104.082489013672, 104.322189331055, 103.974395751953, 104.827789306641,
105.576438903809, 105.718490600586, 105.364852905273, 105.030937194824, 104.75341796875,
103.981002807617, 103.218376159668, 102.868370056152, 102.812316894531)), .Names = "rwhitebread", row.names = c(NA,
-121L), class = "data.frame"), ic = c("bic"), stepwise = FALSE, approximation = FALSE,
xreg = tax.ts, parallel = TRUE, num.cores = 12)
Coefficients:
ma1 tax
0.4589 -0.7035
s.e. 0.0806 0.6969
sigma^2 estimated as 0.6028: log likelihood = -138.36, aic = 282.72
Is there any way to get the same output from the parallelized command, or otherwise extract the parameters used in the model and the AICc/BIC?

Related

raster.kendall function on spatialEco not EnvStats package

Recently I came across over this library in R spatialEco. I want to calculate the Kendall tau statistic for a raster stack in R. but I get an Error to "install EnvStats package before running this function",
when I install this package, then get another Error :"Error in (function (y, x = seq(along = y), alternative = "two.sided", :
When ci.slope=TRUE, there must be at least 3 non-missing, finite observations"
> files_Bio1 <- list.files(path="e:/Pastclim/trend/Bio1/", pattern='asc$', full.names=TRUE)
> Current_Bio2 <- stack(files_Bio2)
> names(Current_Bio2)
[1] "Bio1_370Kbp" "Bio1_380Kbp" "Bio1_390Kbp" "Bio1_400Kbp" "Bio1_410Kbp"
[6] "Bio1_420Kbp"
> library(spatialEco)
> m_Bio1 <- spatialEco::raster.kendall(Current_Bio2, intercept = FALSE, p.value = FALSE, confidence = FALSE,
+ tau = FALSE)
Error in spatialEco::raster.kendall(Current_Bio2, intercept = FALSE, p.value = FALSE, :
please install EnvStats package before running this function

Arima loop vs Arima function [R]

I am trying to build a function where I estimate a lot of arima models with a for loop.
I got the for loops running and getting me my desired output, but as soon as I try to get my code into the function I get errors.
This is the loop:
model_acc <- data.frame()
for (p in 0:4) {
for(d in 0:2){
for (q in 0:4){
model <- arima(data, order=c(p,d,q), method = "ML")
acc <- accuracy(model)
acc <- as.data.frame(acc)
acc_ext <- data.frame(acc,
loglikeli=logLik(model),
AIC=AIC(model),
BIC=BIC(model),
order=paste(p,d,q,sep=","))
acc_ext <- select(acc_ext,
ME, RMSE, MAE, MAPE,loglikeli,AIC,BIC,order)
model_acc <- rbind(model_acc, acc_ext)
}
}
}
I am aware that there are some models that cannot be computed with Maximum Likelihood, due to the constraints in the optimization. But this loop gets me 61 models out of 75 (just with method="CSS"). I get the models that could be computed.
So the parameters I'd like to move are: data, p_max, d_max, q_max, and, method.
So the function goes like this:
which_arima <- function(data, p_max, d_max, q_max, method){
model_acc <- data.frame()
for (p in 0:p_max) {
for(d in 0:d_max){
for (q in 0:q_max){
model <- arima(data, order=c(p,d,q), method = method)
acc <- accuracy(model)
acc <- as.data.frame(acc)
acc_ext <- data.frame(acc,
loglikeli=logLik(model),
AIC=AIC(model),
BIC=BIC(model),
order=paste(p,d,q,sep=","))
acc_ext <- select(acc_ext,
ME, RMSE, MAE, MAPE,loglikeli,AIC,BIC,order)
model_acc <- rbind(model_acc, acc_ext)
}
}
}
return(model_acc)
}
a <- which_arima(data, p_max=4, d_max=2, q_max=4, method="ML")
But when I execute it, I get this error (referring to the models that could not be computed) and don't get anything. (in the for loop only I got the models that could be computed).
Error in optim(init[mask], armafn, method = optim.method, hessian = TRUE, :
non-finite finite-difference value [4]
In addition: Warning messages:
1: In arima(data, order = c(p, d, q), method = method) :
possible convergence problem: optim gave code = 1
2: In arima(data, order = c(p, d, q), method = method) :
possible convergence problem: optim gave code = 1
3: In log(s2) : NaNs produced
4: In log(s2) : NaNs produced
Called from: optim(init[mask], armafn, method = optim.method, hessian = TRUE,
control = optim.control, trans = as.logical(transform.pars))
Browse[1]> Q
What is going wrong? Because without the function environment is working "fine". And more importantly, how can I solve this?
Thanks in advance!!
Here is the data:
> dput(data)
structure(c(1.04885832686158, 1.06016074629379, 1.0517956106758,
1.02907998600003, 1.05054370620123, 1.07261670636915, 1.0706491823234,
1.0851355199628, 1.08488055975672, 1.08085233559646, 1.081489249884,
1.08587205516048, 1.07249155362154, 1.05497731364761, 1.05675866316574,
1.06428371643968, 1.06065865122313, 1.05621234529568, 1.05339905298902,
1.05787030302435, 1.0658034000068, 1.08707776713932, 1.08626056161822,
1.10238697375394, 1.11390088086972, 1.12120513732074, 1.11937921359653,
1.10341241626668, 1.1156190247407, 1.12376155972358, 1.12411603174635,
1.12183475077377, 1.12994175229071, 1.12956170931204, 1.12199732095331,
1.11645064755987, 1.12481242467782, 1.13066151473637, 1.13028712061827,
1.12694056065497, 1.12382226475179, 1.12352013167586, 1.13391069257413,
1.14763982976838, 1.14481816405703, 1.14852949174863, 1.14182560351963,
1.14086563926171, 1.14491904045717, 1.14897189333479, 1.14616964486707,
1.15074750127031, 1.14681353487065, 1.11151754535415, 1.10497749493861,
1.10963378437214, 1.12415745716768, 1.17507535290893, 1.20285968503846,
1.22784769136553, 1.23940795216891, 1.254741010879, 1.29442450660416,
1.30428779451896, 1.31314618462517, 1.32544236970695, 1.33728107423435,
1.34408499591568, 1.34199331033196, 1.34027541040719, 1.33616830504407,
1.33360421057602, 1.33332422301893, 1.34717794252774, 1.3502492092262,
1.35168291803248, 1.35827816606688, 1.36772644852242, 1.36755741578293,
1.36926148542701, 1.37264481021763, 1.37322962601678, 1.37643913938007,
1.37906284181634, 1.37644362054554, 1.38911039237937, 1.39412557349575,
1.40094895608589, 1.40630864159528, 1.40823485306921, 1.4138446752069,
1.42340582796496, 1.43641264727375, 1.43605231080207, 1.44839810240334,
1.45451041581127, 1.46166006472498, 1.46774816064695, 1.46930608347752,
1.47885183796249, 1.49059366171423, 1.49849145403671, 1.51209667142067,
1.5250141727637, 1.5392257264567, 1.55144303632514, 1.56488453313021,
1.58308777691125, 1.59737589266492, 1.60896279958586, 1.62553339664661,
1.63594174408691, 1.65233080464302, 1.67114336171075, 1.6897476078746,
1.71673790971729, 1.74453973794979, 1.76317526009814, 1.79187692264759,
1.84186982937622, 1.9460629324144, 2.05986108970089, 2.06767436493269,
2.0783176148561, 2.08271855277262, 2.09358626977224, 2.09674958523685,
2.11582742548029, 2.12810020369675, 2.13596929171732, 2.13972610568317,
2.14456803530813, 2.15013985201827, 2.16007349878874, 2.17165498940627,
2.18057666565755, 2.19162746118342, 2.20308765886345, 2.21304799942168,
2.22367586966847, 2.23629862083737, 2.24751866055731, 2.26100586740225,
2.40972893063106, 2.60366275683037, 2.68572993101095, 2.70501080420283,
2.6676315643757, 2.6479269687206, 2.64641010174172, 2.69966594490103,
2.69665303568271, 2.71396750774502, 2.71900427132191, 2.72876269360869,
2.76276620421252, 2.76620189252239, 2.74632816231219, 2.74196673817286,
2.72905831066292, 2.75190757584346, 2.77801573354251, 2.84089580821293,
2.85681823660541, 2.84754572013613, 2.85858396073969, 2.86184353545653,
2.86958309986952, 2.94279115543111, 2.98631808884879, 3.00648449252989,
3.00620698598987, 3.15207693676406, 3.27614511764022, 3.32011714920345,
3.39367422894347, 3.64822360464499, 3.61835354049394, 3.59374251055335,
3.63237359915986, 3.62209957896007, 3.64554153297999, 3.71611226971083,
3.76031231050606, 3.80307769833913, 3.77959145461296, 3.74772344909971,
3.95072671083008, 4.03652777624058, 4.06630193640976, 4.08838169421096,
4.09074775372752, 4.09286687677964, 4.11466378890098, 4.14350067096966,
4.18153835521181, 4.21299240125327, 4.23975062689892, 4.26683207875595,
4.29265054707555, 4.31835343358436, 4.34946580314932, 4.37865522989399,
4.41294135451665), .Dim = c(204L, 1L), .Dimnames = list(NULL,
"price"), .Tsp = c(2004, 2020.91666666667, 12), class = "ts")
We could add a tryCatch in the function
which_arima <- function(data, p_max, d_max, q_max, method){
model_acc <- data.frame()
for (p in 0:p_max) {
for(d in 0:d_max){
for (q in 0:q_max){
tryCatch({
model <- arima(data, order=c(p,d,q), method = method)
acc <- accuracy(model)
acc <- as.data.frame(acc)
acc_ext <- data.frame(acc,
loglikeli=logLik(model),
AIC=AIC(model),
BIC=BIC(model),
order=paste(p,d,q,sep=","))
acc_ext <- select(acc_ext,
ME, RMSE, MAE, MAPE,loglikeli,AIC,BIC,order)
model_acc <- rbind(model_acc, acc_ext)
}, error = function(e) NA)
}
}
}
return(model_acc)
}
-testing
a <- which_arima(data, p_max=4, d_max=2, q_max=4, method="ML")
-output
> a
ME RMSE MAE MAPE loglikeli AIC BIC order
Training set 3.916595e-14 1.00150757 0.84665890 47.3734354 -289.77077 583.54155 590.17779 0,0,0
Training set1 1.507413e-03 0.50685119 0.42608540 23.8330904 -153.49920 312.99840 322.95276 0,0,1
Training set2 1.477754e-03 0.27462038 0.23150111 12.9162286 -31.20907 70.41814 83.69062 0,0,2
Training set3 1.349691e-03 0.16826013 0.13265807 7.3234273 67.17326 -124.34652 -107.75592 0,0,3
Training set4 1.205197e-03 0.12347033 0.09404764 5.1708085 132.56865 -253.13729 -233.22857 0,0,4
Training set5 1.649574e-02 0.03945226 0.02063318 0.9365795 367.68785 -733.37570 -730.06250 0,1,0
Training set6 1.103986e-02 0.03456075 0.01736215 0.7957414 394.41586 -784.83172 -778.20531 0,1,1
Training set7 1.033720e-02 0.03443721 0.01713550 0.7848747 395.13798 -784.27595 -774.33634 0,1,2
Training set8 9.546954e-03 0.03417545 0.01651661 0.7683963 396.69035 -785.38071 -772.12788 0,1,3
Training set9 8.268413e-03 0.03353547 0.01710311 0.7855244 400.43015 -790.86030 -774.29427 0,1,4
Training set10 1.081905e-04 0.03982073 0.01849307 0.8429273 363.50025 -725.00049 -721.69223 0,2,0
Training set11 2.800510e-03 0.03429965 0.01750163 0.8103622 392.52320 -781.04639 -774.42986 0,2,1
Training set12 2.920421e-03 0.03214346 0.01515181 0.7129633 405.66898 -805.33795 -795.41315 0,2,2
Training set13 2.915234e-03 0.03206868 0.01541923 0.7234715 406.11610 -804.23221 -790.99914 0,2,3
Training set14 2.915216e-03 0.03206786 0.01543761 0.7239875 406.11852 -802.23704 -785.69571 0,2,4
Training set15 1.609540e-02 0.03954680 0.02075934 0.9489873 365.76961 -725.53923 -715.58487 1,0,0
Training set16 1.067822e-02 0.03464237 0.01747532 0.8057485 392.50610 -777.01221 -763.73973 1,0,1
Training set17 7.714409e-03 0.03500020 0.01712196 0.8100354 390.85979 -771.71958 -755.12898 1,0,2
Training set18 9.510129e-03 0.03417676 0.01653561 0.7702435 398.64834 -785.29668 -765.38796 1,0,3
Training set19 9.299540e-03 0.03407723 0.01644942 0.7661016 399.22596 -784.45192 -761.22508 1,0,4
Training set20 8.521452e-03 0.03440107 0.01658612 0.7665062 395.36364 -786.72729 -780.10088 1,1,0
Training set21 9.502976e-03 0.03434348 0.01673934 0.7705014 395.69269 -785.38538 -775.44577 1,1,1
Training set22 3.638516e-03 0.03220174 0.01508764 0.7126483 408.13770 -808.27541 -795.02259 1,1,2
Training set23 3.626362e-03 0.03212825 0.01534293 0.7227711 408.58054 -807.16108 -790.59505 1,1,3
Training set24 8.353323e-03 0.03319389 0.01722817 0.8063780 402.38983 -792.77965 -772.90042 1,1,4
Training set25 1.322429e-04 0.03862934 0.01853910 0.8452931 369.60607 -735.21213 -728.59560 1,2,0
Training set26 2.950783e-03 0.03271462 0.01554742 0.7271035 402.15143 -798.30287 -788.37806 1,2,1
Training set27 2.918645e-03 0.03207500 0.01535616 0.7214170 406.08191 -804.16382 -790.93075 1,2,2
Training set28 2.915432e-03 0.03206844 0.01542446 0.7236258 406.11678 -802.23356 -785.69222 1,2,3
Training set29 2.892408e-03 0.03184546 0.01585528 0.7398747 407.44682 -802.89365 -783.04404 1,2,4
Training set30 3.275778e-02 0.06802502 0.03811120 1.7010099 257.85907 -507.71814 -494.44566 2,0,0
Training set31 9.458801e-03 0.03434793 0.01677640 0.7737224 397.64430 -785.28860 -768.69800 2,0,1
Training set32 1.041047e-02 0.03449857 0.01757479 0.8092751 393.34656 -774.69312 -754.78440 2,0,2
Training set33 1.036041e-02 0.03438249 0.01712067 0.7851881 397.17474 -780.34949 -757.12265 2,0,3
Training set34 9.291907e-03 0.03413569 0.01668650 0.7780739 395.47305 -774.94611 -748.40115 2,0,4
Training set35 8.657322e-03 0.03439622 0.01656361 0.7657220 395.39192 -784.78384 -774.84422 2,1,0
Training set36 8.975188e-03 0.03415064 0.01646538 0.7625588 396.82841 -785.65683 -772.40401 2,1,1
Training set37 3.623756e-03 0.03213180 0.01528195 0.7207391 408.54688 -807.09376 -790.52773 2,1,2
Training set38 3.632392e-03 0.03218922 0.01509295 0.7124041 408.20813 -804.41627 -784.53703 2,1,3
Training set39 3.593594e-03 0.03190425 0.01582339 0.7407521 409.90942 -805.81883 -782.62639 2,1,4
Training set40 2.046999e-04 0.03534316 0.01743186 0.8004223 387.39069 -768.78138 -758.85658 2,2,0
Training set41 2.900379e-03 0.03229554 0.01543942 0.7252999 404.68622 -801.37243 -788.13936 2,2,1
Training set42 2.912130e-03 0.03206051 0.01549744 0.7258632 406.16110 -802.32220 -785.78086 2,2,2
Training set43 2.748199e-03 0.03106662 0.01710118 0.8027724 411.95382 -811.90764 -792.05804 2,2,3
Training set44 2.757572e-03 0.03048849 0.01571731 0.7454319 413.92678 -813.85355 -790.69568 2,2,4
Training set45 8.190706e-03 0.03447649 0.01665674 0.7750253 393.49946 -776.99891 -760.40831 3,0,0
Training set46 8.485733e-03 0.03422971 0.01656490 0.7726290 394.93100 -777.86199 -757.95327 3,0,1
Training set47 9.212683e-03 0.03436951 0.01678990 0.7781612 393.54762 -773.09523 -749.86839 3,0,2
Training set48 8.721991e-03 0.03406162 0.01638032 0.7597073 399.31535 -782.63070 -756.08574 3,0,3
Training set49 -1.095108e-03 0.03200273 0.01626031 0.8020173 407.59681 -797.19361 -767.33053 3,0,4
Training set50 6.642458e-03 0.03334238 0.01646485 0.7579776 401.61737 -795.23474 -781.98192 3,1,0
Training set51 3.614071e-03 0.03235398 0.01536258 0.7247132 407.14878 -804.29756 -787.73153 3,1,1
Training set52 3.626052e-03 0.03212051 0.01541776 0.7251026 408.62481 -805.24962 -785.37038 3,1,2
Training set53 3.434470e-03 0.03112232 0.01708791 0.8047847 414.43876 -814.87751 -791.68507 3,1,3
Training set54 3.429177e-03 0.03037721 0.01633882 0.7892697 417.28525 -818.57050 -792.06486 3,1,4
Training set55 2.343659e-04 0.03506668 0.01740936 0.7937255 388.95388 -769.90777 -756.67470 3,2,0
Training set56 2.921378e-03 0.03207232 0.01556489 0.7249596 406.11547 -802.23095 -785.68961 3,2,1
Training set57 2.923439e-03 0.03200307 0.01554917 0.7264361 406.53973 -801.07945 -781.22984 3,2,2
Training set58 2.772438e-03 0.03033715 0.01644824 0.7949022 414.69079 -815.38158 -792.22370 3,2,3
Training set59 2.758142e-03 0.03032083 0.01638087 0.7893286 414.81461 -813.62923 -787.16309 3,2,4
Training set60 6.105981e-03 0.03341497 0.01657335 0.7683822 399.75129 -787.50258 -767.59386 4,0,0
Training set61 7.918597e-03 0.03223310 0.01733992 0.8410424 404.54182 -793.08364 -766.53868 4,0,2
Training set62 3.580192e-03 0.03210903 0.01545304 0.7266964 410.69767 -803.39533 -773.53225 4,0,3
Training set63 9.682367e-03 0.03234607 0.01684835 0.8031973 407.24757 -794.49514 -761.31394 4,0,4
Training set64 6.558516e-03 0.03333914 0.01646740 0.7571758 401.63677 -793.27354 -776.70751 4,1,0
Training set65 6.614327e-03 0.03334172 0.01646714 0.7576681 401.62115 -791.24231 -771.36307 4,1,1
Training set66 3.601945e-03 0.03225054 0.01523192 0.7204685 407.38418 -800.76837 -777.57593 4,1,2
Training set67 3.435674e-03 0.03038226 0.01636894 0.7939351 417.16875 -818.33749 -791.83184 4,1,3
Training set68 3.441183e-03 0.03057401 0.01590164 0.7605592 416.07385 -814.14770 -784.32885 4,1,4
Training set69 2.783446e-04 0.03453279 0.01699742 0.7813553 391.99285 -773.98571 -757.44437 4,2,0
Training set70 2.922130e-03 0.03191875 0.01548585 0.7279757 407.03673 -802.07347 -782.22386 4,2,1
Training set71 2.921712e-03 0.03191246 0.01550785 0.7286895 407.07153 -800.14306 -776.98519 4,2,2
Training set72 2.757144e-03 0.03032018 0.01638662 0.7906756 414.79253 -813.58506 -787.11892 4,2,3
Training set73 2.776647e-03 0.03052505 0.01588780 0.7626634 413.36293 -808.72586 -778.95146 4,2,4

Checking residuals (from ETS+STL method) with checkresiduals() function

I have one ts object which contain one column with weekly data (freqency = 52) for the period 2016-2019(only one week from 2019).
#>TEST_1
#>Time Series:
#>Start = c(2016, 1)
#>End = c(2019, 1)
#>Frequency = 52
So I am performing forecast with this ts object with, function forcast() from forecast package.This function give me selection of best model ETS (Exponential smoothing) for my series.
Forecast method: STL + ETS(M,A,N)
Model Information:
ETS(M,A,N)
Call:
ets(y = x, model = etsmodel, allow.multiplicative.trend = allow.multiplicative.trend)
Smoothing parameters:
alpha = 0.0044
beta = 0.0044
Initial states:
l = 496.0001
b = -0.7495
sigma: 0.2538
AIC AICc BIC
2328.009 2328.406 2343.290
But here arise a new problem for me. Namely I trying to perform residual diagnostics for residuals from this model with function checkresiduals() but I receive this message.
#> Warning message:
#> In modeldf.default(object) :
#> Could not find appropriate degrees of freedom for this model.
So can anybody help me how to find appropriate degrees of freedom for this model
with checkresiduals() function? Below is data from residuals.
residuals<-structure(c(103.861587225712, 232.922530738897, -177.501044573567,
-32.3310448885088, 51.8658720663952, -127.669525632371, -21.3736988850188,
31.8283388622758, 134.388167819753, -202.279672375648, -150.211885150427,
59.7872220312138, 7.21928088178879, -31.0067512774922, 240.664063232754,
-259.693899860492, 51.2068097649542, 133.051059120384, 153.754774108432,
-245.448120335887, -41.7151580882252, 329.736089553496, -176.574681226445,
-5.49877539363433, -57.9440644242901, -141.920372666123, 59.631632197218,
30.3566233456523, -19.5674149569647, 49.8299466802158, 8.08039437858747,
-179.219757481181, 61.6262480548803, 14.2886335749734, 147.521659709062,
-203.114556948222, 232.39658682842, 17.0359701527633, 122.671792930753,
1.17404214154658, -21.3604900851155, 43.6067134825538, 56.6694972222097,
-74.206099457236, 22.2154797604099, -42.6209506582884, -69.0881062270763,
44.9935627424999, -65.4843011281191, 45.9859871219855, 38.48475732006,
217.607886572158, -81.752879329815, -62.3165846738133, 91.3280029935076,
13.8065979268541, -27.5160607993942, -2.45614326754531, 8.82428074173083,
-21.9816546447523, 58.6350169306539, 2.99591624137327, 25.4548944489055,
-7.80971451574547, -33.741824891111, 148.727324165574, -103.887619405031,
13.6976122890256, -6.22642628362576, -89.0151943344358, 151.68500527824,
113.373271376477, 165.103295852743, -295.039665234726, 213.698114407198,
-76.4034402042766, -9.34573346398901, -71.4103830503603, 122.800589573655,
-55.724016585403, 63.7939569095491, 44.9784699409192, 151.519180259845,
-58.4408170188741, -74.3037359893916, -47.7713298497972, 163.367074626196,
-249.379445021869, -112.112655284116, -43.5458433646284, -53.5666005867634,
281.491207440336, -121.212142480196, -33.9138735682901, -31.1438180301793,
-31.2555698825003, 20.3181357200996, -46.2564548372715, 19.2769399131227,
82.0903051423776, -53.9874588993755, -81.7381076026692, -109.42037514781,
-128.567530337503, 239.606771386708, -163.928615298084, 88.3650587021525,
22.3840519205474, -19.7936259061341, 133.392615761316, 14.8789465334592,
-7.35384302392632, -193.309220279654, 199.807229000058, 124.081926626315,
-52.3795507957004, 26.248230162833, -123.352126375918, -136.687848362162,
242.06397333675, -49.2896526387001, -47.0413692896267, -315.639803224046,
122.111855110991, -135.453045844048, -34.9514109509343, -51.0671430546247,
75.2304903204274, 58.5168476811577, 205.900859581612, -195.231017102347,
17.0666471041718, -55.7835085816988, -105.931678098968, -173.52733115843,
229.313605012801, 4.76417288414814, 24.9291766474627, -324.904858037879,
449.500524512662, -126.709163220759, 18.7291455153395, -76.1328146141673,
-298.217791616455, 137.973841964018, -16.2916958267025, -31.8650948708939,
99.4876416447454, -49.4760819558044, 84.1071094148195, 44.155870901787,
-133.53348599245, 117.30321085781, 35.0222913102854, 71.5981819455558,
-87.2032279610021, -272.900607282635), .Tsp = c(2016, 2019, 52
), class = "ts")
The degrees of freedom are: DF = (# lags to be tested) - (# parameters that you estimate).
You can manually set the degrees of freedom using:
checkresidual(..., df=DF)
Hope it works.
Goosse

How can I use TexReg (1.36.4) for a "relogit" model estimated using Zelig (v. 5.0-13)?

I know that Zelig is a wrapper... But still, it provides nice simulation capabilities (which I wouldn't be able to do on my own).
Lets say I have this data,
set.seed(123)
x1 = rnorm(5)
x2 = rnorm(5)
z = 1 + 2*x1 + 3*x2
pr = 1/(1+exp(-z))
y = rbinom(5,1,pr)
df = data.frame(y=y,x1=x1,x2=x2)
Now, we estimate the model,
library(Zelig)
relogit <- zelig(y ~ x1 + x2, model = "relogit", data = df)
And now, we (try to) make the table
library(texreg)
texreg(relogit)
... only to get this error.
Error in (function (classes, fdef, stable):
unable to find an inherited method for function ‘extract’ for
signature ‘"Zelig-relogit"’
I am aware of the $getvcov() and $getcoef() functions. But I wonder how I could make a straightforward table using texreg. Any advice will be greatly appreciated. Thanks!
texreg uses a generic function called extract to pull the relevant data from a model object and then processes the resulting texreg object to create a regression table. In order to extend the range of models texreg is applicable to, you can write your own methods for the extract function.
Zelig-relogit objects apparently store a glm object with the relevant data somewhere inside the object and attach a different class name to it. So it should be relatively straightforward to create a copy of this sub-object, fix its class name, and apply the existing extract.glm method to this object to extract the data. More specifically:
# extension for Zelig-relogit objects (Zelig package >= 5.0)
extract.Zeligrelogit <- function(model, include.aic = TRUE, include.bic = TRUE,
include.loglik = TRUE, include.deviance = TRUE, include.nobs = TRUE, ...) {
g <- model$zelig.out$z.out[[1]]
class(g) <- "glm"
e <- extract(g, include.aic = include.aic, include.bic = include.bic,
include.loglik = include.loglik, include.deviance = include.deviance,
include.nobs = include.nobs, ...)
return(e)
}
setMethod("extract", signature = className("Zelig-relogit", "Zelig"),
definition = extract.Zeligrelogit)
This code creates a Zelig-relogit method for the extract function. You can use it by typing something like screenreg(relogit), where relogit is the name of your Zelig-relogit object. The result should look like this:
==================================
Model 1
----------------------------------
(Intercept) -9446502571.59 ***
(62615.78)
x1 19409089045.70 ***
(141084.20)
x2 856836055.47 ***
(98175.65)
----------------------------------
AIC 6.00
BIC 4.83
Log Likelihood -0.00
Deviance 0.00
Num. obs. 5
==================================
*** p < 0.001, ** p < 0.01, * p < 0.05
More generally, if you want to make any Zelig model work with texreg, you should look at model$zelig.out$z.out[[1]] to find the relevant information. I will include the Zelig-relogit extract method in the next texreg release.

Caret Package with "nnet" see weight of hidden layer

I'm using Caret Package to train a model using the "nnet" method. It's working but i need to see weights used in the hidden layers.
This is possible when we use the nnet function directly:
model<-nnet(Data[5:8], Data[4],size=10,maxit=100000,linout=T,decay=0.1)
model$wts
[1] 9.160050e-01 1.184379e+00 -1.201645e+00 1.041427e+00 -2.367287e-03 6.861753e+00 1.223522e+00 -1.875841e+01 -1.233203e-02
[10] 5.281464e-01 -1.605204e+00 1.497933e+00 -2.882815e+00 -1.511277e+01 2.732411e-01 -2.999315e+01 1.498460e-01 -9.405826e-01
[19] -2.800337e+00 9.600647e-02 1.588405e+00 -2.106175e+00 -8.807753e+00 2.762392e+01 2.091118e-01 3.265564e+01 6.516821e-01
[28] 1.304455e-01 -7.633166e+00 1.017017e-02 6.366411e+01 -2.902564e-02 1.376147e-01 -8.353788e+00 6.376588e-04 5.995577e+00
[37] 1.176301e+01 -8.569926e+00 1.971122e+01 -2.358067e-01 3.971781e+01 1.940421e-01 1.755913e-01 -5.817047e+00 1.988909e-03
[46] 1.408106e+00 -1.549250e+00 1.757245e+01 -5.760102e+01 1.001197e+00 -5.493371e+00 4.786298e+00 6.049659e+00 -1.762611e+01
[55] -9.598485e+00 -1.716196e+01 6.477683e+00 -1.971476e+01 4.468062e+00 2.125993e+01 4.683170e+01
How can i see the weights when using the caret package?
mynnetfit <- train(DC ~ T+c+f+h, data = Data1, method = "nnet",
maxit = 1000, tuneGrid = my.grid, trace = T, linout = 1, trControl = ctrl)
The model object mynnetfit has a finalModel component, which is of class nnet. You can then coef(mynnetfit$finalModel) to get the weights of the nodes.
For example
library(caret)
## simulate data
set.seed(1)
dat <- LPH07_2(100, 20)
mod <- train(y ~ ., data=dat, method="nnet", trace=FALSE, linout=TRUE)
coef(mod$finalModel)
b->h1 i1->h1 i2->h1 i3->h1 i4->h1 i5->h1 i6->h1 i7->h1 i8->h1 i9->h1
-0.7622230 8.5760791 9.6162685 -13.0549859 5.3306854 8.1679126 3.1832575 -5.4354694 4.8410017 -6.3811887
i10->h1 i11->h1 i12->h1 i13->h1 i14->h1 i15->h1 i16->h1 i17->h1 i18->h1 i19->h1
7.0813781 3.4709351 5.6444663 4.2530566 0.6594511 0.5579828 23.5802215 5.0381758 -0.4883967 -13.0613378
i20->h1 i21->h1 i22->h1 i23->h1 i24->h1 i25->h1 i26->h1 i27->h1 i28->h1 i29->h1
11.8905272 -0.2732984 -4.5190578 -2.3095693 0.8891562 1.7922645 -0.4666446 -1.0980723 -4.7742597 -5.1603453
i30->h1 i31->h1 i32->h1 i33->h1 i34->h1 i35->h1 i36->h1 i37->h1 i38->h1 i39->h1
-0.1285864 2.2160653 0.2990097 -5.1722264 -4.8375324 1.4537326 -1.6870400 -2.1019009 1.3542151 0.7036545
i40->h1 b->o h1->o
-2.1592154 10.7700684 -27.4712736

Resources