Non-linear regression vs log model - r

library(ggplot2)
dat <- structure(list(y = c(52L, 63L, 59L, 58L, 57L, 54L, 27L, 20L, 15L, 27L, 27L, 26L, 70L, 70L, 70L, 70L, 70L, 70L, 45L, 42L, 41L, 55L, 45L, 39L, 51L,
64L, 57L, 39L, 59L, 37L, 44L, 44L, 38L, 57L, 50L, 56L, 66L, 66L, 64L, 64L, 60L, 55L, 52L, 57L, 47L, 57L, 64L, 63L, 49L, 49L,
56L, 55L, 57L, 42L, 60L, 53L, 53L, 57L, 56L, 54L, 42L, 45L, 34L, 52L, 57L, 50L, 60L, 59L, 52L, 42L, 45L, 47L, 45L, 51L, 39L,
38L, 42L, 33L, 62L, 57L, 65L, 44L, 44L, 39L, 46L, 49L, 52L, 44L, 43L, 38L),
x = c(122743L, 132300L, 146144L, 179886L, 195180L, 233605L, 1400L, 1400L, 3600L, 5000L, 14900L, 16000L, 71410L, 85450L, 106018L,
119686L, 189746L, 243171L, 536545L, 719356L, 830031L, 564546L, 677540L, 761225L, 551561L, 626799L, 68618L, 1211267L, 1276369L,
1440113L, 1153720L, 1244575L, 1328641L, 610452L, 692624L, 791953L, 4762522L, 5011232L, 5240402L, 521339L,
560098L, 608641L, 4727833L, 4990042L, 5263899L, 1987296L, 2158704L, 2350927L, 7931905L, 8628608L, 8983683L, 2947957L, 3176995L, 3263118L,
55402L, 54854L, 55050L, 52500L, 72000L, 68862L, 1158244L, 1099976L, 1019490L, 538146L, 471219L, 437954L, 863592L, 661055L,
548097L, 484450L, 442643L, 404487L, 1033728L, 925514L, 854793L, 371420L, 285257L, 260157L, 2039241L, 2150710L, 1898614L,
1175287L, 1495433L, 1569586L, 2646966L, 3330486L, 3282677L, 745784L, 858574L, 1119671L)),
class = "data.frame", row.names = c(NA, -90L))
ggplot(dat, aes(x = x, y = y)) + geom_point()
The relationship seems like a non-linear relationship. Hence I will fitted a model where I logged y and x
mod.lm <- lm(log(y) ~ log(x), data = dat)
ggplot(dat, aes(x = log(x), y = log(y))) + geom_point() + geom_smooth(method = "lm")
However, I can see that for lower values, the log-transformation results in big differences as shown by the residuals. I then moved to non linear least square method. I have not used this before but using this post
Why is nls() giving me "singular gradient matrix at initial parameter estimates" errors?
c.0 <- min(dat$y) * 0.5
model.0 <- lm(log(y - c.0) ~ x, data = dat)
start <- list(a = exp(coef(model.0)[1]), b = coef(model.0)[2], c = c.0)
model <- nls(y ~ a * exp(b * x) + c, data = dat, start = start)
Error in nls(y ~ a * exp(b * x) + c, data = dat, start = start) :
step factor 0.000488281 reduced below 'minFactor' of 0.000976562
Can anyone advise me what does this error mean and how to fit a nls model to the above data?

In your case nls get in problems as your starting values are not good and you introduced the coefficient c which is not there in the linearized form.
To fit your nls you can do it the following way, with better staring values and removing the coefficient c:
mod.glm <- glm(y ~ x, dat=dat, family=poisson(link = "log"))
start <- list(a = coef(mod.glm)[1], b = coef(mod.glm)[2])
mod.nls <- nls(y ~ exp(a + b * x), data = dat, start = start)
I would recommend to use glm, as shown above, instead of nls to find the coefficients.
If the estimate of the linearized model (mod.lm) should not have a bias you need to adjust it.
mod.lm <- lm(log(y) ~ log(x), data = dat)
mean(dat$y) #50.44444
mean(predict(mod.glm, type="response")) #50.44444
mean(predict(mod.nls)) #50.44499
mean(exp(predict(mod.lm))) #49.11622 !
f <- log(mean(dat$y) / mean(exp(predict(mod.lm)))) #bias corection for a
mean(exp(coef(mod.lm)[1] + f + coef(mod.lm)[2]*log(dat$x))) #50.44444
In case you want to get the coefficients given from James Phillips in the comments by your own, you can try:
mod.nlsJP <- nls(y ~ a * (x^(b*x)) + offset, data=dat, start=list(a=-30, b=-5e-6, offset=50))

Related

Plotting multiple different histograms based on vector of column names

I have the following dataframe that I want to plot a histogram for each column:
structure(list(ACTB = c(11.7087918, 13.1847403, 8.767737, 12.2949669,
12.399929, 12.130683, 9.816222, 10.700336, 11.862543, 12.479818,
12.48152, 11.798277, 12.0932696, 11.014992, 12.3496682, 11.9810211,
11.946094, 12.1517049, 11.6794028, 12.4895911, 12.787039, 12.2927522,
12.746232, 12.4428358, 11.6382198, 11.6833202, 12.3320067, 12.390378,
12.5550587, 11.597384, 11.7608624, 12.018702, 11.9211984, 11.7143178,
11.800693, 12.7543979, 12.7028472, 11.6509804, 11.5112258, 12.36468,
12.0704304, 12.5876125, 12.2929857, 11.764464, 12.3740263, 12.275172,
11.5247418, 11.9290723, 11.100383, 12.5631062, 10.647334, 12.265323,
11.457643, 12.194339, 11.468173, 12.355388, 12.3233796, 12.200504,
11.716417, 12.430028, 11.3201558, 11.43911, 12.9782049, 11.139062,
11.181185, 10.123614, 11.963833, 10.919224, 11.873896, 11.800616,
12.2159602, 11.6360763, 11.6204291, 11.5500821, 12.6783682, 11.918854,
11.8701782, 10.98058, 11.6254916, 12.1558646, 11.533709, 12.0096358,
12.2830638, 11.772724, 11.8853726, 12.041823, 12.623814, 12.3134903,
11.6714245, 12.1333082, 12.4747336, 11.5326378, 12.6222532, 10.922728,
10.9492515, 11.3410073, 12.3005053), ATP5F1 = c(8.3731175, 8.3995189,
8.871088, 8.4389342, 8.529104, 9.004405, 8.883721, 8.70097, 8.24411,
8.393635, 8.76813, 8.756177, 8.4418168, 7.986864, 8.4840108,
8.6523954, 8.5645576, 8.2452877, 8.2440872, 8.7155973, 9.028364,
8.3578703, 9.007441, 7.8892308, 9.0255621, 8.3165712, 8.3400111,
8.061171, 8.5216917, 8.337517, 8.2341439, 8.810458, 8.8794988,
8.4657149, 8.311901, 8.131606, 8.5865282, 9.0900416, 8.8407707,
7.437107, 8.3982759, 8.7610335, 8.3624475, 8.353429, 8.3630127,
8.555639, 8.6435841, 8.9587154, 8.517079, 8.9597121, 8.111514,
8.99767, 8.266991, 8.106218, 8.518875, 8.445485, 8.6409752, 8.662025,
8.697312, 8.071819, 8.3113401, 8.709276, 8.9154896, 8.138148,
6.866765, 9.391611, 8.448086, 8.29189, 8.541953, 8.801044, 8.3088083,
8.288688, 8.8357729, 8.4731257, 8.7321095, 8.383259, 8.4729561,
5.551528, 8.526436, 8.4548827, 8.242625, 8.9862422, 8.5688994,
8.848029, 8.2656363, 8.434976, 8.8023704, 8.6692361, 8.4333198,
8.2926568, 8.2141276, 8.3246346, 7.7262395, 8.0797336, 8.7005427,
8.7695946, 8.1262312), DDX5 = c(11.3122241, 11.7042284, 8.866042,
12.0376754, 12.417701, 11.479431, 10.078783, 9.043405, 11.216074,
11.846906, 11.161803, 8.713301, 11.0790887, 11.685125, 11.9599302,
12.4036502, 11.9778411, 11.9900709, 11.6069971, 11.2651929, 11.455536,
12.3741866, 11.558182, 11.498146, 12.5073231, 11.4546523, 11.8465482,
11.51445, 11.721283, 12.340818, 11.5388553, 11.920725, 11.7067172,
11.6207138, 11.638226, 11.1407525, 11.5832407, 11.981909, 11.7684202,
12.435987, 11.5253382, 10.9882446, 12.1789747, 11.956257, 12.5427815,
12.007658, 11.6360041, 12.2520109, 11.858959, 12.4740761, 6.927855,
11.117424, 7.749824, 11.518817, 11.322855, 11.74096, 11.768474,
11.497009, 11.912888, 11.570506, 11.8167398, 11.912566, 11.2631437,
11.328946, 11.072161, 12.807216, 12.127281, 12.125497, 11.524622,
11.20101, 11.5451414, 12.0747211, 11.5716524, 11.7223929, 11.8529683,
11.868865, 11.8998228, 9.859857, 12.1404707, 11.9166386, 12.613162,
12.9062351, 11.6691732, 11.984726, 11.727059, 11.421816, 11.9506736,
12.2447547, 11.8167228, 11.9021356, 12.5527606, 12.6511506, 11.8550833,
11.382018, 11.8314198, 11.8394352, 11.8128198), EEF1G = c(12.622405,
11.2945857, 8.610078, 13.1323891, 12.702769, 12.319703, 10.181874,
8.615338, 11.526551, 12.106198, 11.602801, 9.137166, 13.0991666,
13.049641, 12.2938678, 11.7442632, 12.7866184, 12.6753617, 12.9552413,
12.0861518, 13.136434, 12.64865, 13.298616, 11.8531038, 12.7791485,
13.4150478, 11.636058, 12.013313, 11.8785493, 12.771945, 12.5351321,
13.147321, 11.6760014, 12.2604174, 11.802344, 12.23351, 12.1175728,
12.7360727, 12.5730595, 11.13, 11.7737462, 11.9774565, 11.8927844,
12.17392, 12.441605, 12.221691, 12.4866463, 12.5645763, 12.070268,
12.1801377, 8.80704, 12.288168, 8.298831, 12.234659, 11.832415,
12.474423, 12.4440819, 11.888544, 11.625162, 12.161204, 12.2707656,
12.941017, 12.3491325, 12.978561, 11.833124, 11.782119, 12.273029,
12.462202, 12.538127, 12.236135, 12.2884941, 12.4195123, 12.5274317,
12.3917089, 11.912339, 12.439751, 12.0962051, 10.912737, 11.999598,
12.3776528, 11.348448, 12.4151316, 11.5389366, 11.328957, 12.4397802,
12.238454, 12.0192408, 12.2290439, 12.8381542, 11.1834666, 12.0636739,
12.4752125, 12.7681644, 12.1747129, 12.7343662, 12.3493937, 11.7971488
)), class = "data.frame", row.names = c(1L, 2L, 3L, 4L, 5L, 6L,
7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L, 19L,
20L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L, 31L, 32L,
33L, 34L, 35L, 36L, 37L, 38L, 39L, 40L, 41L, 42L, 43L, 44L, 45L,
46L, 47L, 48L, 49L, 50L, 51L, 52L, 53L, 54L, 55L, 56L, 57L, 58L,
59L, 60L, 61L, 62L, 63L, 64L, 66L, 67L, 68L, 69L, 70L, 71L, 72L,
73L, 75L, 76L, 77L, 78L, 79L, 80L, 81L, 82L, 83L, 84L, 85L, 86L,
87L, 88L, 89L, 90L, 91L, 92L, 93L, 97L, 98L, 99L, 100L, 102L,
103L))
I want to create a grid of histograms for each column, the list of column is:
HK_GENES = c(
"ACTB", "ATP5F1", "DDX5", "EEF1G"
)
Is there a way of doing it with ggplot2?
I tried with no success the following:
ggplot(data=df_hk_genes, aes_string(x=HK_GENES)) +
geom_histogram(bins=15) +
facet_wrap(HK_GENES, nrow = 5, scale = "free_x")
In python I could create a subfigure for each histogram an iterate over it.
I have around 20 column in my original dataframe, and I want to avoid calling the same block with different column
You can reshape the data and facet over the groups.
library(reshape2)
library(dplyr)
melt(df_hk_genes) %>%
ggplot(aes(x = value)) +
facet_wrap(~ variable, nrow = 5, scale = "free_x") +
geom_histogram(bins=15)

Cluster sequences in a network by their editing distance - in R

I have a dataframe my_df with 10,000 different sequences with different lengths (between 13to18) they comprised from different numbers (0-3)
example of my data (60 lines) :
library(stringdist)
library(igraph)
library(reshape2)
structure(list(alfa_ch = c("2000000232003211","2000000331021", "20000003310320011", "20000003323331021",
"20000003331001","20000003331001", "20000003332021", "200000100331021",
"20000013011001","20000013301021", "2000001333331011", "20000023231031",
"200000233302001","20000023331011", "20000023331012", "20000023332021",
"200000233331021","20000030231011", "200000303323331021", "200000313301021",
"20000032031021","2000003220021", "2000003221011", "2000003231031",
"20000032311001","200000330330021", "2000003311211", "2000003331001",
"2000003331001","2000003331012", "20000033321012", "200000333231011",
"20000033323331021","20000033331021", "2000010320011", "20000103323331021",
"200001113011001","20000113011001", "20000120330021", "20000123033011",
"2000012331131","2000013011001", "2000013301021", "200001330231011",
"2000013323001","20000133231311", "20000133301001", "200001333331011",
"200001333331011","200001333331011", "200001333331011", "20000200331021",
"20000200331021","20000200331131", "20000203221011", "2000020333133011",
"20000212221111","20000213301021", "2000021331011", "200002223231011")),
row.names = c(1L,3L, 5L, 6L, 7L, 8L, 9L, 10L, 12L, 13L, 14L, 16L, 17L, 18L, 19L,20L, 21L,
23L, 24L, 27L, 29L, 31L, 32L, 33L, 34L, 35L, 38L, 41L,42L, 43L, 46L, 47L, 48L,
49L, 58L, 59L, 60L, 62L, 63L, 64L, 66L,68L, 71L, 72L, 73L, 74L, 75L, 77L, 78L,
79L, 80L, 81L, 82L, 83L,84L, 85L, 89L, 90L, 91L, 95L), class = "data.frame")
, my goal is to cluster them by editing distance < 3.
dist_mtx=as.matrix(stringdistmatrix(my_df$alfa,my_df$alfa,method = "lv"))
dist_mtx[dist_mtx>3]=NA
dist_mtx[new_test_2==0]=NA
colnames(dist_mtx) <- dist_mtx$alfa
rownames(dist_mtx) <- dist_mtx$alfa
then created an edge list , while the value represents the editing distance between any 2 sequences:
edge_list <- unique(melt(dist_mtx,na.rm = TRUE,varnames = c('seq1','seq2'),as.is = T))
edge_list=edge_list[!is.na(edge_list$value),]
then created the igraph object :
igraph_obj <- igraph::graph_from_data_frame(edge_list,directed = F,vertices = dist_mtx$alfa)
then i tried numerous methods to try and cluster those sequences with louvain method and im still getting clusters which its members have editing distance > 3 , im aware that it might be because of the connected components.
so my questions are :
is there a way to cluster to sequences together so that in each cluster the members would be with editing distance < 3 ?
is there a way to recognize the cluster centers (HUBS) , tried hubness.score() and assign vertices according to those centers with consideration of the editing distance ?
this is my first post ,
i will appreciate any help

Is there a way to plot confidence intervals for an orthogonal/ TLS regression model?

I've found the functions onls::onls() and pracma::odregress() that calculate orthogonal regression models. I would like to plot such models in the same style as geom_smooth(), that is, the regression line surrounded by a 95% confidence interval.
Example:
example <- structure(list(y = c(-28.9143374863044, -28.5783512160246, -29.1751498307569,
-28.5613677412358, -29.2441600709021, -29.1848482932202, -29.469712350617,
-29.1212786695474, -29.3338385227209, -29.0582324840251, -29.1159002526588,
-29.1384485361936, -29.4743426548081, -29.242305699462, -29.5517891592378,
-29.1701701877517, -29.2337122509592, -29.150317639976, -29.139526754614,
-29.05974643127, -29.0540797909476, -29.0859798970361, -29.27517072563,
-29.1907525452561, -30.0965246973573, -28.9734662257987, -29.6953578711591,
-28.2014460687026, -30.0621997994278, -27.9399550295493, -29.8886842413551,
-29.6609659140518, -29.6920474706673, -30.2418230320867, -29.8334571372628,
-29.8626462112615, -29.9051818751105, -29.6518825347484, -29.5380886463871,
-29.7500527026688, -29.6095990506199, -29.6049957701729, -29.5368579894466,
-29.5861340837645, -29.5737037489314, -29.5773848425703, -28.0265409956043,
-28.0899954900073, -28.265152586989, -28.0062832808179, -27.7205565228848,
-27.4041257575861, -28.1113851658386, -26.914663492446, -27.877772497213,
-27.0684956870887, -27.9276723508022, -27.7588907638397, -27.3710663654935,
-27.3623535825255, -27.7783142763593, -28.5132310123219, -28.5193067297636,
-28.5283974320574, -28.6153706663899, -28.6816032262091, -29.1043640141426,
-28.44589108955, -28.6614098552091, -28.7403207700811), x = c(33.1158714294,
18.6527993810972, 17.0276514703819, 22.3627925702962, 18.170924813473,
32.0677953809724, 46.5216445923, 34.9911138888596, 25.0910229505442,
13.9473438263, 17.381641499988, 17.014380035215, 40.9107205320526,
52.2695803285185, 58.9499627404227, 40.5894751586832, 23.496896254444,
33.6412616569372, 14.7548102820616, 46.3057677573658, 14.280050708175,
31.2877073530984, 18.8534870545271, 16.5168182808868, 63.9908365598676,
33.7277991683148, 35.4163778417314, 32.1050571361531, 51.3240160147292,
24.4237814340378, 39.3334452128324, 53.8079129732769, 43.26844558712,
58.3003234863, 43.934151887875, 76.8046441618721, 64.8779439305438,
46.8684772359235, 66.4989547729, 41.9780584414396, 50.2248225396345,
58.8492643072032, 64.5647735596, 48.3225469025232, 60.4074024077677,
57.3789336302925, 11.2785320282, 11.3491302769043, 7.59091310831495,
18.4789668943737, 5.84773873549871, 10.6156844347299, 15.7432512138035,
11.4885938379565, 7.74754936760848, 12.1071624756, 14.9075944237136,
20.9201573163328, 30.2789412366595, 33.8582180028129, 15.4269225956373,
8.53801707561128, 10.1814249853966, 7.33018941782735, 8.42749268077253,
9.74786459733547, 10.5363144200841, 10.7873065304121, 16.7602893825786,
12.7551904319156)), class = "data.frame", row.names = c(1L, 2L,
3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L, 16L,
17L, 18L, 19L, 20L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L,
30L, 31L, 32L, 33L, 34L, 35L, 36L, 39L, 40L, 41L, 42L, 43L, 44L,
45L, 46L, 47L, 48L, 49L, 50L, 51L, 52L, 53L, 54L, 55L, 56L, 57L,
58L, 59L, 60L, 61L, 62L, 63L, 64L, 65L, 66L, 67L, 68L, 69L, 70L,
71L, 72L))
mod <- onls(y ~ a*x+b, data = example, start = list(a = 0.03, b = -28))
newData <- data.frame(x = seq(min(example$x), max(example$x), 0.1))
newData$y <- predict(mod, newdata = newData)
plot(y ~ x, data = newData, type = "l", col = "red")
points(y ~ x, data = example)
# for a regular lm() model the subsequent steps would be
conf <- predict(mod, newdata = newData, interval = "confidence", level = 0.95)
lines(newData$x, conf[,2])
lines(newData$x, conf[,3])
However, the last steps won't give any useful result when applied to the onls model. Are there any methods to calculate or estimate those confidence intervals?
Edit: As DanY mentioned, the onls package contains the confint.onls() function. This will give me the upper and lower estimate for each regression parameter at a given confidence level:
confint(mod, level = 0.95)
I could do something like
conf_a <- confint(mod, param = "a", level = 0.95)
conf_b <- confint(mod, param = "b", level = 0.95)
and calculate the extrema for each x
x <- seq(min(example$x), max(example$x), 0.1)
test <- cbind(
conf_a[1]*x+conf_b[1],
conf_a[1]*x+conf_b[2],
conf_a[2]*x+conf_b[1],
conf_a[2]*x+conf_b[2]
)
maxima <- vector()
for(i in 1:length(x)){
maxima[i] <- max(test[i,])
}
but this doesn't quite look like what I#d expect and I'm not really convinced this is the correct approach.

How could I insert a histogram into a `geom_smooth` plot?

I am trying to mimic some figures from journal papers. Here is an example from Schlenker and Roberts (2009).
I'd like to add a similar histogram to my own plot. Please see below. Is it possible to achieve this task with ggplot? Thanks.
See a dput data below. rh represents x axis and yhat1 indicates the y axis.
> dput(df.m[,c('rh','yhat1')])
structure(list(rh = c(11L, 13L, 15L, 16L, 17L, 18L, 19L, 20L,
21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L, 31L, 32L, 33L,
34L, 35L, 36L, 37L, 38L, 39L, 40L, 41L, 42L, 43L, 44L, 45L, 46L,
47L, 48L, 49L, 50L, 51L, 52L, 53L, 54L, 55L, 56L, 57L, 58L, 59L,
60L, 61L, 62L, 63L, 64L, 65L, 66L, 67L, 68L, 69L, 70L, 71L, 72L,
73L, 74L, 75L, 76L, 77L, 78L, 79L, 80L, 81L, 82L, 83L, 84L, 85L,
86L, 87L, 88L, 89L, 90L, 91L, 92L, 93L, 94L, 95L, 96L, 97L, 98L,
99L, 100L), yhat1 = c(0.0097784, 0.111762325, 0.0887123966666667,
0.24714677, 0.079887235, 0.162714825, 0.24789043, 0.107558165,
0.182885584545455, 0.136690964444444, 0.159203683333333, 0.5156053805,
0.587034213636364, 0.233377613, 0.31531245, 0.4778449572, 0.212574774137931,
0.2274105676, 0.253733041707317, 0.560999839354839, 0.224892959444444,
0.392268151304348, 0.351498776603774, 0.366547010727273, 0.35013903469697,
0.382026272372881, 0.510611202461538, 0.391176294871795, 0.423356474328358,
0.380316089137931, 0.459821489651163, 0.388949226593407, 0.506833284166667,
0.459263999259259, 0.558535709906542, 0.745323656071429, 0.60167464606383,
0.72210854266129, 0.695203745656566, 0.638265557105263, 0.52373110503876,
0.611695133046875, 0.963833986386555, 0.803060819275362, 0.837984669112426,
0.7931166204, 0.870764136976744, 1.21005393820225, 0.862845527777778,
1.028402381125, 1.2077895633526, 1.01176334204082, 1.08139833964706,
0.90346288, 1.05871937863014, 1.27788244930233, 1.16250975336634,
1.1450916525, 1.4412301412, 1.21264826238281, 1.35417930411504,
1.18588206727273, 1.40277204710084, 1.33194569259259, 1.18413544210084,
1.22718163528571, 1.33992107226667, 1.44770425268156, 1.43974964777778,
1.26656031551351, 1.58998655363636, 1.29994566024272, 1.46398530493902,
1.26061274530055, 1.30718501225275, 1.20523443567901, 1.23789593428571,
1.34433582230769, 1.36438752851852, 1.5915544857037, 1.10979387898438,
1.31898147708661, 1.426120105, 1.52075980155738, 1.40629729460177,
0.9048366681, 1.2973945580531, 1.37696154192982)), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -88L))
Hopefully this can get you started:
library(ggplot2)
breaks <- 20
maxcount <- max(table(cut(df.m$rh,breaks = 20))) + 1
ggplot(data = df.m, aes(x = rh)) +
stat_smooth(formula = y ~ x, aes(y = yhat1 * 10 + maxcount), method = "loess") +
scale_y_continuous(breaks = c(0,5), "Exposure (Days)",
sec.axis = sec_axis(~ (. - maxcount) /10,
"Log of Daily Confirmed Case Counts")) +
geom_histogram(bins = breaks, color = "black", fill = "green") +
geom_segment(aes(x = 85, xend = 85, y = 0 + maxcount, yend = Inf),
col = "red", linetype = "dashed") +
labs(x = "Relative Humidity Percentage") + theme_classic() +
theme(axis.line.y.left = element_line(color= "green"),
axis.title.y.left = element_text(hjust = 0.05, color = "green"))

Legends for two different sized series in ggplot2#R

Is there a way to get legends for two series when plotted using ggplot in R?
May be I am missing some silly (should have known) argument in the function. I did not find the answer on the internet.
Here is the data:
df1 <- structure(list(time = structure(c(1352804400, 1352804430, 1352804460,
1352804490, 1352804520, 1352804550, 1352804580, 1352804610, 1352804640,
1352804670, 1352804700, 1352804730, 1352804760, 1352804790, 1352804820,
1352804850, 1352804880, 1352804910, 1352804940, 1352804970, 1352805000,
1352805030, 1352805060, 1352805090, 1352805120, 1352805150, 1352805180,
1352805210, 1352805240, 1352805270, 1352805300, 1352805330, 1352805360,
1352805390, 1352805420, 1352805450, 1352805480, 1352805510, 1352805540,
1352805570), class = c("POSIXct", "POSIXt"), tzone = ""), VE = c(36L,
31L, 32L, 55L, 39L, 45L, 46L, 60L, 56L, 53L, 58L, 60L, 30L, 38L,
55L, 40L, 47L, 52L, 33L, 34L, 58L, 38L, 39L, 33L, 39L, 50L, 38L,
32L, 32L, 41L, 44L, 35L, 48L, 51L, 59L, 35L, 51L, 56L, 39L, 35L
)), .Names = c("time", "VE"), row.names = c(NA, -40L), class = "data.frame")
df2 <- structure(list(time = structure(c(1352804400, 1352804430, 1352804460,
1352804490, 1352804520, 1352804550, 1352804580, 1352804610, 1352804640,
1352804670, 1352804700, 1352804730, 1352804760, 1352804790, 1352804820,
1352804850, 1352804880, 1352804910, 1352804940, 1352804970, 1352805000,
1352805030, 1352805060, 1352805090, 1352805120, 1352805150, 1352805180,
1352805210, 1352805240, 1352805270), class = c("POSIXct", "POSIXt"
), tzone = ""), VE = c(47L, 45L, 45L, 40L, 42L, 40L, 48L, 48L,
43L, 44L, 44L, 46L, 42L, 49L, 41L, 48L, 47L, 44L, 44L, 48L, 47L,
42L, 42L, 40L, 47L, 46L, 50L, 49L, 46L, 49L)), .Names = c("time",
"VE"), row.names = c(NA, -30L), class = "data.frame")
Here is the code:
ggplot(df1,aes(x=time, y=VE))+geom_line(color='red',size=1)+geom_line(data=df2,aes(x=time, y=VE),colour="blue",size=2)
Specifically, implementing #baptiste's comment:
dff <- rbind(data.frame(s=factor(1),df1),
data.frame(s=factor(2),df2))
ggplot(dff,aes(x=time, y=VE,colour=s,size=s))+
geom_line()+
scale_colour_manual(values=c("red","blue"))+
scale_size_manual(values=1:2)
Using #Ben's answer and comments, removing the extra-legend, and then renaming the legend title I wrote:
dff <- rbind(data.frame(s=factor(1),df1),data.frame(s=factor(2),df2))
ggplot(dff,aes(x=time, y=VE,colour=s))+geom_line()+scale_colour_manual(values=c("red","blue"),labels=c('My label-1','My label-2'),name='Legend Title')+ scale_size_manual(values=c("red","blue"))+theme(axis.title.x = element_text(face="bold", size=16),axis.title.y= element_text(face="bold",size=16),axis.text.x = element_text(angle=0, vjust=0.5, size=14),axis.text.y = element_text(angle=0, vjust=0.5, size=14))
and got

Resources