I am trying to plot geographical location on a map. I am using ggplot2 in combination with maps libraries.
Everything is nice except for on region that is very crowded. I would like to make a zoom in on this region but I am not able.
So I would like to zoom on the Galapagos island.
Here is the script I have used:
library(ggplot2)
library(maps)
measurements <- read.csv("all_podo.count.csv", header=T)
allworld <- map_data("world")
d <- ggplot(measurements, aes(long, lat)) +
geom_polygon(data = allworld, xlim = c(-50, 100), aes(x = long, y = lat, group = group),
colour = "grey70", fill = "grey70") +
geom_point(size = 0.7, shape = 8) +
opts(axis.title.x = theme_blank(),
axis.title.y = theme_blank()) +
geom_text(aes(label = name), size = 1, vjust = 0, hjust = 0)
d
The measurements look like this:
structure(list(site_num = c(59L, 54L, 44L, 42L, 38L, 37L, 43L,
39L, 36L, 40L, 34L, 35L, 33L, 41L, 32L, 31L, 30L, 29L, 28L, 27L,
26L, 25L, 24L, 23L, 22L, 21L, 20L, 19L, 18L, 17L, 16L, 15L, 14L,
13L, 12L, 11L, 10L, 7L, 8L, 9L, 1L, 3L, 6L, 5L, 79L, 77L, 78L,
76L, 75L, 74L, 80L, 81L, 72L, 73L, 71L, 70L, 69L, 68L, 66L, 67L,
65L, 63L, 64L, 62L, 60L, 61L), name = structure(c(44L, 43L, 42L,
40L, 36L, 35L, 41L, 37L, 34L, 38L, 32L, 33L, 31L, 39L, 30L, 29L,
28L, 27L, 26L, 25L, 24L, 23L, 22L, 21L, 20L, 19L, 18L, 17L, 16L,
15L, 14L, 13L, 12L, 11L, 10L, 9L, 8L, 5L, 6L, 7L, 1L, 2L, 4L,
3L, 64L, 62L, 63L, 61L, 60L, 59L, 65L, 66L, 57L, 58L, 56L, 55L,
54L, 53L, 51L, 52L, 50L, 48L, 49L, 47L, 45L, 46L), .Label = c("GS000a",
"GS000b", "GS000c", "GS000d", "GS001a", "GS001b", "GS001c", "GS002",
"GS003", "GS004", "GS005", "GS006", "GS007", "GS008", "GS009",
"GS010", "GS011", "GS012", "GS013", "GS014", "GS015", "GS016",
"GS017", "GS018", "GS019", "GS020", "GS021", "GS022", "GS023",
"GS025", "GS026", "GS027", "GS028", "GS029", "GS030", "GS031",
"GS032", "GS033", "GS034", "GS035", "GS036", "GS037", "GS047",
"GS051", "GS108a", "GS108b", "GS109", "GS110a", "GS110b", "GS111",
"GS112a", "GS112b", "GS113", "GS114", "GS115", "GS116", "GS117a",
"GS117b", "GS119", "GS120", "GS121", "GS122a", "GS122b", "GS123",
"GS148", "GS149"), class = "factor"), lat = c(-15.143611, -10.131389,
-1.9738889, 1.3891667, -0.3011111, 0.27222222, -0.020833334,
-0.5938889, -0.2, -1.2283334, -1.2161111, -1.2169445, 1.2641667,
-0.38305557, 5.552778, 5.64, 6.492778, 8.129167, 9.164444, 10.716389,
18.036667, 20.5225, 24.174723, 24.488333, 32.506943, 36.003887,
38.946945, 39.417778, 38.94, 41.09111, 41.485832, 43.63222, 45.111668,
44.690277, 44.137222, 42.85278, 42.503056, 32.166668, 32.166668,
32.166668, 31.175, 31.175, 31.175, 32.174835, -32.399166, -30.898333,
-30.898333, -29.348888, -26.035, -23.21611, -6.3166666, -6.3166666,
-4.613611, -4.613611, -4.635, -4.6625, -4.990278, -7.0075, -8.505,
-8.505, -9.596945, -10.446111, -10.446111, -10.943611, -12.0925,
-12.0925), long = c(-147.435, -135.44945, -95.014725, -91.81695,
-91.651665, -91.63333, -91.19778, -91.06944, -90.83528, -90.42917,
-90.422775, -90.319725, -90.295, -90.279724, -87.087776, -86.56528,
-82.90389, -79.69111, -79.83611, -80.25445, -83.78472, -85.41361,
-84.344444, -83.07, -79.263885, -75.39472, -76.41722, -75.504166,
-74.685, -71.60222, -71.35111, -66.84722, -64.94666, -63.637222,
-63.644444, -66.217224, -67.24, -64.5, -64.5, -64.5, -64.32433,
-64.32433, -64.32433, -64.01017, 36.591946, 40.420277, 40.420277,
43.215557, 50.123055, 52.30611, 39.009167, 39.009167, 55.50861,
55.50861, 56.836113, 60.523056, 64.97667, 76.33139, 80.37556,
80.37556, 84.1975, 88.30278, 88.30278, 92.05889, 96.88167, 96.88167
)), .Names = c("site_num", "name", "lat", "long"), class = "data.frame", row.names = c(NA,
-66L))
Can you help me ?
Thank you,
Simon
This shows you that the world map on package maps is not particularly high resolution by the time you get down to the level of the Galapagos:
if (require("maps")) {
world <- map_data("world")
mid_range <- function(x) mean(range(x))
library(plyr)
ggplot(world, aes(long, lat)) +
geom_polygon( aes(group = group), fill = "green", colour = "red")+
ylim(c(-2, 1 )) + xlim( c(-93,-88) ) }
There doesn't seem to be a group with that name. I'm thinking yoou might want to find a better shapefile for this project.:
grep("Is", unique(world$region), value=TRUE)
1 "Israel"
[2] "Marshall Islands"
[3] "Solomon Islands"
[4] "Cook Islands"
[5] "South Sandwich Islands"
[6] "Sonsorol Island"
[7] "Maug Island"
[8] "Pitcairn Islands"
[9] "Isle of Man"
[10] "Andaman Islands"
[11] "Northern Mariana Islands"
[12] "Madeira Islands"
[13] "Sin Cowe Island"
[14] "Paracel Islands"
[15] "Falkland Islands"
[16] "Cayman Islands"
[17] "Virgin Islands"
[18] "Canary Islands"
[19] "Spratly Island"
[20] "Isle of Wight"
An RSeek search finds several links. One of them to :
http://downloads.cloudmade.com/americas/south_america/ecuador/galapagos/galapagos.shapefiles.zip
"All of the files at downloads.cloudmade.com are based on data from OpenStreetMap and are licensed under the terms of the Creative Commons Attribution Share-Alike 2.0 license. If you use these files please make sure you attribute the OpenStreetMap community by including a link to www.openstreetmap.org.:
Try running the following after your code above:
library(grid)
pushViewport(
viewport( x=unit(0.1,'npc'), y=unit(0.1,'npc'), width=unit(0.2,'npc'),
height=unit(0.2,'npc'))
)
d2 <- ggplot(measurements, aes(long, lat)) +
geom_polygon(data = allworld, xlim = c(-50, 100), aes(x = long, y = lat, group = group), colour = "grey70", fill = "grey70") +
geom_point(size = 0.7, shape = 8) + ylim(c(-2, 1 )) + xlim( c(-93,-88) )
print(d2, newpage=FALSE)
This should give a general idea of adding a sub map, but you will want to adjust the above to get a better zoom and better looking results.
Related
I have the following dataframe that I want to plot a histogram for each column:
structure(list(ACTB = c(11.7087918, 13.1847403, 8.767737, 12.2949669,
12.399929, 12.130683, 9.816222, 10.700336, 11.862543, 12.479818,
12.48152, 11.798277, 12.0932696, 11.014992, 12.3496682, 11.9810211,
11.946094, 12.1517049, 11.6794028, 12.4895911, 12.787039, 12.2927522,
12.746232, 12.4428358, 11.6382198, 11.6833202, 12.3320067, 12.390378,
12.5550587, 11.597384, 11.7608624, 12.018702, 11.9211984, 11.7143178,
11.800693, 12.7543979, 12.7028472, 11.6509804, 11.5112258, 12.36468,
12.0704304, 12.5876125, 12.2929857, 11.764464, 12.3740263, 12.275172,
11.5247418, 11.9290723, 11.100383, 12.5631062, 10.647334, 12.265323,
11.457643, 12.194339, 11.468173, 12.355388, 12.3233796, 12.200504,
11.716417, 12.430028, 11.3201558, 11.43911, 12.9782049, 11.139062,
11.181185, 10.123614, 11.963833, 10.919224, 11.873896, 11.800616,
12.2159602, 11.6360763, 11.6204291, 11.5500821, 12.6783682, 11.918854,
11.8701782, 10.98058, 11.6254916, 12.1558646, 11.533709, 12.0096358,
12.2830638, 11.772724, 11.8853726, 12.041823, 12.623814, 12.3134903,
11.6714245, 12.1333082, 12.4747336, 11.5326378, 12.6222532, 10.922728,
10.9492515, 11.3410073, 12.3005053), ATP5F1 = c(8.3731175, 8.3995189,
8.871088, 8.4389342, 8.529104, 9.004405, 8.883721, 8.70097, 8.24411,
8.393635, 8.76813, 8.756177, 8.4418168, 7.986864, 8.4840108,
8.6523954, 8.5645576, 8.2452877, 8.2440872, 8.7155973, 9.028364,
8.3578703, 9.007441, 7.8892308, 9.0255621, 8.3165712, 8.3400111,
8.061171, 8.5216917, 8.337517, 8.2341439, 8.810458, 8.8794988,
8.4657149, 8.311901, 8.131606, 8.5865282, 9.0900416, 8.8407707,
7.437107, 8.3982759, 8.7610335, 8.3624475, 8.353429, 8.3630127,
8.555639, 8.6435841, 8.9587154, 8.517079, 8.9597121, 8.111514,
8.99767, 8.266991, 8.106218, 8.518875, 8.445485, 8.6409752, 8.662025,
8.697312, 8.071819, 8.3113401, 8.709276, 8.9154896, 8.138148,
6.866765, 9.391611, 8.448086, 8.29189, 8.541953, 8.801044, 8.3088083,
8.288688, 8.8357729, 8.4731257, 8.7321095, 8.383259, 8.4729561,
5.551528, 8.526436, 8.4548827, 8.242625, 8.9862422, 8.5688994,
8.848029, 8.2656363, 8.434976, 8.8023704, 8.6692361, 8.4333198,
8.2926568, 8.2141276, 8.3246346, 7.7262395, 8.0797336, 8.7005427,
8.7695946, 8.1262312), DDX5 = c(11.3122241, 11.7042284, 8.866042,
12.0376754, 12.417701, 11.479431, 10.078783, 9.043405, 11.216074,
11.846906, 11.161803, 8.713301, 11.0790887, 11.685125, 11.9599302,
12.4036502, 11.9778411, 11.9900709, 11.6069971, 11.2651929, 11.455536,
12.3741866, 11.558182, 11.498146, 12.5073231, 11.4546523, 11.8465482,
11.51445, 11.721283, 12.340818, 11.5388553, 11.920725, 11.7067172,
11.6207138, 11.638226, 11.1407525, 11.5832407, 11.981909, 11.7684202,
12.435987, 11.5253382, 10.9882446, 12.1789747, 11.956257, 12.5427815,
12.007658, 11.6360041, 12.2520109, 11.858959, 12.4740761, 6.927855,
11.117424, 7.749824, 11.518817, 11.322855, 11.74096, 11.768474,
11.497009, 11.912888, 11.570506, 11.8167398, 11.912566, 11.2631437,
11.328946, 11.072161, 12.807216, 12.127281, 12.125497, 11.524622,
11.20101, 11.5451414, 12.0747211, 11.5716524, 11.7223929, 11.8529683,
11.868865, 11.8998228, 9.859857, 12.1404707, 11.9166386, 12.613162,
12.9062351, 11.6691732, 11.984726, 11.727059, 11.421816, 11.9506736,
12.2447547, 11.8167228, 11.9021356, 12.5527606, 12.6511506, 11.8550833,
11.382018, 11.8314198, 11.8394352, 11.8128198), EEF1G = c(12.622405,
11.2945857, 8.610078, 13.1323891, 12.702769, 12.319703, 10.181874,
8.615338, 11.526551, 12.106198, 11.602801, 9.137166, 13.0991666,
13.049641, 12.2938678, 11.7442632, 12.7866184, 12.6753617, 12.9552413,
12.0861518, 13.136434, 12.64865, 13.298616, 11.8531038, 12.7791485,
13.4150478, 11.636058, 12.013313, 11.8785493, 12.771945, 12.5351321,
13.147321, 11.6760014, 12.2604174, 11.802344, 12.23351, 12.1175728,
12.7360727, 12.5730595, 11.13, 11.7737462, 11.9774565, 11.8927844,
12.17392, 12.441605, 12.221691, 12.4866463, 12.5645763, 12.070268,
12.1801377, 8.80704, 12.288168, 8.298831, 12.234659, 11.832415,
12.474423, 12.4440819, 11.888544, 11.625162, 12.161204, 12.2707656,
12.941017, 12.3491325, 12.978561, 11.833124, 11.782119, 12.273029,
12.462202, 12.538127, 12.236135, 12.2884941, 12.4195123, 12.5274317,
12.3917089, 11.912339, 12.439751, 12.0962051, 10.912737, 11.999598,
12.3776528, 11.348448, 12.4151316, 11.5389366, 11.328957, 12.4397802,
12.238454, 12.0192408, 12.2290439, 12.8381542, 11.1834666, 12.0636739,
12.4752125, 12.7681644, 12.1747129, 12.7343662, 12.3493937, 11.7971488
)), class = "data.frame", row.names = c(1L, 2L, 3L, 4L, 5L, 6L,
7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L, 19L,
20L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L, 31L, 32L,
33L, 34L, 35L, 36L, 37L, 38L, 39L, 40L, 41L, 42L, 43L, 44L, 45L,
46L, 47L, 48L, 49L, 50L, 51L, 52L, 53L, 54L, 55L, 56L, 57L, 58L,
59L, 60L, 61L, 62L, 63L, 64L, 66L, 67L, 68L, 69L, 70L, 71L, 72L,
73L, 75L, 76L, 77L, 78L, 79L, 80L, 81L, 82L, 83L, 84L, 85L, 86L,
87L, 88L, 89L, 90L, 91L, 92L, 93L, 97L, 98L, 99L, 100L, 102L,
103L))
I want to create a grid of histograms for each column, the list of column is:
HK_GENES = c(
"ACTB", "ATP5F1", "DDX5", "EEF1G"
)
Is there a way of doing it with ggplot2?
I tried with no success the following:
ggplot(data=df_hk_genes, aes_string(x=HK_GENES)) +
geom_histogram(bins=15) +
facet_wrap(HK_GENES, nrow = 5, scale = "free_x")
In python I could create a subfigure for each histogram an iterate over it.
I have around 20 column in my original dataframe, and I want to avoid calling the same block with different column
You can reshape the data and facet over the groups.
library(reshape2)
library(dplyr)
melt(df_hk_genes) %>%
ggplot(aes(x = value)) +
facet_wrap(~ variable, nrow = 5, scale = "free_x") +
geom_histogram(bins=15)
I have a dataframe my_df with 10,000 different sequences with different lengths (between 13to18) they comprised from different numbers (0-3)
example of my data (60 lines) :
library(stringdist)
library(igraph)
library(reshape2)
structure(list(alfa_ch = c("2000000232003211","2000000331021", "20000003310320011", "20000003323331021",
"20000003331001","20000003331001", "20000003332021", "200000100331021",
"20000013011001","20000013301021", "2000001333331011", "20000023231031",
"200000233302001","20000023331011", "20000023331012", "20000023332021",
"200000233331021","20000030231011", "200000303323331021", "200000313301021",
"20000032031021","2000003220021", "2000003221011", "2000003231031",
"20000032311001","200000330330021", "2000003311211", "2000003331001",
"2000003331001","2000003331012", "20000033321012", "200000333231011",
"20000033323331021","20000033331021", "2000010320011", "20000103323331021",
"200001113011001","20000113011001", "20000120330021", "20000123033011",
"2000012331131","2000013011001", "2000013301021", "200001330231011",
"2000013323001","20000133231311", "20000133301001", "200001333331011",
"200001333331011","200001333331011", "200001333331011", "20000200331021",
"20000200331021","20000200331131", "20000203221011", "2000020333133011",
"20000212221111","20000213301021", "2000021331011", "200002223231011")),
row.names = c(1L,3L, 5L, 6L, 7L, 8L, 9L, 10L, 12L, 13L, 14L, 16L, 17L, 18L, 19L,20L, 21L,
23L, 24L, 27L, 29L, 31L, 32L, 33L, 34L, 35L, 38L, 41L,42L, 43L, 46L, 47L, 48L,
49L, 58L, 59L, 60L, 62L, 63L, 64L, 66L,68L, 71L, 72L, 73L, 74L, 75L, 77L, 78L,
79L, 80L, 81L, 82L, 83L,84L, 85L, 89L, 90L, 91L, 95L), class = "data.frame")
, my goal is to cluster them by editing distance < 3.
dist_mtx=as.matrix(stringdistmatrix(my_df$alfa,my_df$alfa,method = "lv"))
dist_mtx[dist_mtx>3]=NA
dist_mtx[new_test_2==0]=NA
colnames(dist_mtx) <- dist_mtx$alfa
rownames(dist_mtx) <- dist_mtx$alfa
then created an edge list , while the value represents the editing distance between any 2 sequences:
edge_list <- unique(melt(dist_mtx,na.rm = TRUE,varnames = c('seq1','seq2'),as.is = T))
edge_list=edge_list[!is.na(edge_list$value),]
then created the igraph object :
igraph_obj <- igraph::graph_from_data_frame(edge_list,directed = F,vertices = dist_mtx$alfa)
then i tried numerous methods to try and cluster those sequences with louvain method and im still getting clusters which its members have editing distance > 3 , im aware that it might be because of the connected components.
so my questions are :
is there a way to cluster to sequences together so that in each cluster the members would be with editing distance < 3 ?
is there a way to recognize the cluster centers (HUBS) , tried hubness.score() and assign vertices according to those centers with consideration of the editing distance ?
this is my first post ,
i will appreciate any help
I've found the functions onls::onls() and pracma::odregress() that calculate orthogonal regression models. I would like to plot such models in the same style as geom_smooth(), that is, the regression line surrounded by a 95% confidence interval.
Example:
example <- structure(list(y = c(-28.9143374863044, -28.5783512160246, -29.1751498307569,
-28.5613677412358, -29.2441600709021, -29.1848482932202, -29.469712350617,
-29.1212786695474, -29.3338385227209, -29.0582324840251, -29.1159002526588,
-29.1384485361936, -29.4743426548081, -29.242305699462, -29.5517891592378,
-29.1701701877517, -29.2337122509592, -29.150317639976, -29.139526754614,
-29.05974643127, -29.0540797909476, -29.0859798970361, -29.27517072563,
-29.1907525452561, -30.0965246973573, -28.9734662257987, -29.6953578711591,
-28.2014460687026, -30.0621997994278, -27.9399550295493, -29.8886842413551,
-29.6609659140518, -29.6920474706673, -30.2418230320867, -29.8334571372628,
-29.8626462112615, -29.9051818751105, -29.6518825347484, -29.5380886463871,
-29.7500527026688, -29.6095990506199, -29.6049957701729, -29.5368579894466,
-29.5861340837645, -29.5737037489314, -29.5773848425703, -28.0265409956043,
-28.0899954900073, -28.265152586989, -28.0062832808179, -27.7205565228848,
-27.4041257575861, -28.1113851658386, -26.914663492446, -27.877772497213,
-27.0684956870887, -27.9276723508022, -27.7588907638397, -27.3710663654935,
-27.3623535825255, -27.7783142763593, -28.5132310123219, -28.5193067297636,
-28.5283974320574, -28.6153706663899, -28.6816032262091, -29.1043640141426,
-28.44589108955, -28.6614098552091, -28.7403207700811), x = c(33.1158714294,
18.6527993810972, 17.0276514703819, 22.3627925702962, 18.170924813473,
32.0677953809724, 46.5216445923, 34.9911138888596, 25.0910229505442,
13.9473438263, 17.381641499988, 17.014380035215, 40.9107205320526,
52.2695803285185, 58.9499627404227, 40.5894751586832, 23.496896254444,
33.6412616569372, 14.7548102820616, 46.3057677573658, 14.280050708175,
31.2877073530984, 18.8534870545271, 16.5168182808868, 63.9908365598676,
33.7277991683148, 35.4163778417314, 32.1050571361531, 51.3240160147292,
24.4237814340378, 39.3334452128324, 53.8079129732769, 43.26844558712,
58.3003234863, 43.934151887875, 76.8046441618721, 64.8779439305438,
46.8684772359235, 66.4989547729, 41.9780584414396, 50.2248225396345,
58.8492643072032, 64.5647735596, 48.3225469025232, 60.4074024077677,
57.3789336302925, 11.2785320282, 11.3491302769043, 7.59091310831495,
18.4789668943737, 5.84773873549871, 10.6156844347299, 15.7432512138035,
11.4885938379565, 7.74754936760848, 12.1071624756, 14.9075944237136,
20.9201573163328, 30.2789412366595, 33.8582180028129, 15.4269225956373,
8.53801707561128, 10.1814249853966, 7.33018941782735, 8.42749268077253,
9.74786459733547, 10.5363144200841, 10.7873065304121, 16.7602893825786,
12.7551904319156)), class = "data.frame", row.names = c(1L, 2L,
3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L, 16L,
17L, 18L, 19L, 20L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L,
30L, 31L, 32L, 33L, 34L, 35L, 36L, 39L, 40L, 41L, 42L, 43L, 44L,
45L, 46L, 47L, 48L, 49L, 50L, 51L, 52L, 53L, 54L, 55L, 56L, 57L,
58L, 59L, 60L, 61L, 62L, 63L, 64L, 65L, 66L, 67L, 68L, 69L, 70L,
71L, 72L))
mod <- onls(y ~ a*x+b, data = example, start = list(a = 0.03, b = -28))
newData <- data.frame(x = seq(min(example$x), max(example$x), 0.1))
newData$y <- predict(mod, newdata = newData)
plot(y ~ x, data = newData, type = "l", col = "red")
points(y ~ x, data = example)
# for a regular lm() model the subsequent steps would be
conf <- predict(mod, newdata = newData, interval = "confidence", level = 0.95)
lines(newData$x, conf[,2])
lines(newData$x, conf[,3])
However, the last steps won't give any useful result when applied to the onls model. Are there any methods to calculate or estimate those confidence intervals?
Edit: As DanY mentioned, the onls package contains the confint.onls() function. This will give me the upper and lower estimate for each regression parameter at a given confidence level:
confint(mod, level = 0.95)
I could do something like
conf_a <- confint(mod, param = "a", level = 0.95)
conf_b <- confint(mod, param = "b", level = 0.95)
and calculate the extrema for each x
x <- seq(min(example$x), max(example$x), 0.1)
test <- cbind(
conf_a[1]*x+conf_b[1],
conf_a[1]*x+conf_b[2],
conf_a[2]*x+conf_b[1],
conf_a[2]*x+conf_b[2]
)
maxima <- vector()
for(i in 1:length(x)){
maxima[i] <- max(test[i,])
}
but this doesn't quite look like what I#d expect and I'm not really convinced this is the correct approach.
test <- structure(list(trip_count = 1:10, dropoff_longitude = c(-73.959862,
-73.882202, -73.934113, -73.992203, -74.00563, -73.975189, -73.97448,
-73.974838, -73.981377, -73.955093), dropoff_latitude = c(40.773617,
40.744175, 40.715923, 40.749203, 40.726158, 40.729824, 40.763599,
40.754135, 40.759987, 40.765224)), row.names = c(NA, -10L), class = c("data.table",
"data.frame"), .internal.selfref = <pointer: 0x7fd18800f6e0>)
> dput(zip_codes)
zip_codes <- structure(list(zipcode = c("10001", "10002", "10003", "10004",
"10005", "10006", "10007", "10009", "10010", "10011", "10012",
"10013", "10014", "10016", "10017", "10018", "10019", "10020",
"10021", "10022", "10023", "10024", "10025", "10026", "10027",
"10028", "10029", "10030", "10031", "10032", "10033", "10034",
"10035", "10036", "10037", "10038", "10039", "10040", "10044",
"10065", "10069", "10075", "10103", "10110", "10111", "10112",
"10115", "10119", "10128", "10152", "10153", "10154", "10162",
"10165", "10167", "10168", "10169", "10170", "10171", "10172",
"10173", "10174", "10177", "10199", "10271", "10278", "10279",
"10280", "10282"), bounds_north = c(40.759731, 40.724136, 40.739673,
40.709044, 40.709294, 40.71369, 40.71719, 40.734975, 40.745421,
40.756703, 40.731706, 40.727557, 40.742873, 40.752197, 40.757912,
40.762526, 40.773446, 40.761094, 40.775045, 40.764898, 40.783192,
40.818099, 40.811264, 40.807546, 40.822108, 40.782213, 40.800665,
40.824032, 40.834372, 40.850517, 40.861552, 40.87765, 40.809582,
40.765558, 40.819569, 40.714451, 40.846615, 40.866336, 40.772955,
40.770517, 40.781007, 40.777677, 40.761771, 40.755516, 40.759689,
40.759899, 40.811331, 40.751522, 40.787914, 40.759059, 40.764279,
40.758432, 40.770085, 40.752801, 40.755303, 40.752119, 40.754974,
40.753811, 40.756556, 40.755928, 40.754783, 40.752116, 40.7556,
40.752723, 40.708797, 40.71628, 40.713256, 40.714767, 40.719611
), bounds_south = c(40.743451, 40.708802, 40.722933, 40.683919,
40.702879, 40.705871, 40.709806, 40.718612, 40.73231, 40.731043,
40.719867, 40.713446, 40.72428, 40.73801, 40.747251, 40.749102,
40.758645, 40.757284, 40.758133, 40.751445, 40.768436, 40.778805,
40.788476, 40.79691, 40.803047, 40.770062, 40.782531, 40.812791,
40.817221, 40.829083, 40.842958, 40.849745, 40.781075, 40.752197,
40.806636, 40.701689, 40.817912, 40.851863, 40.749415, 40.759284,
40.771612, 40.769441, 40.759787, 40.753481, 40.758538, 40.758436,
40.810373, 40.749101, 40.773108, 40.757749, 40.762964, 40.757125,
40.768355, 40.75146, 40.753994, 40.750775, 40.753811, 40.751441,
40.755243, 40.754619, 40.753481, 40.750766, 40.754678, 40.750241,
40.707694, 40.714082, 40.711995, 40.700273, 40.713378), bounds_east = c(-73.984076,
-73.973635, -73.979864, -73.995657, -74.004569, -74.009988, -74.000455,
-73.971282, -73.971566, -73.990798, -73.991794, -73.994035, -73.999555,
-73.968192, -73.964271, -73.981822, -73.973015, -73.977201, -73.947973,
-73.958599, -73.974067, -73.960687, -73.954966, -73.944667, -73.940404,
-73.944337, -73.930891, -73.936232, -73.938588, -73.934671, -73.92216,
-73.910587, -73.914228, -73.978116, -73.933219, -73.991772, -73.929107,
-73.924385, -73.940026, -73.952085, -73.986609, -73.947039, -73.975831,
-73.980395, -73.976744, -73.97845, -73.963058, -73.99111, -73.937328,
-73.970993, -73.971411, -73.971451, -73.94827, -73.977677, -73.973735,
-73.976048, -73.975209, -73.974648, -73.97282, -73.973276, -73.978332,
-73.973959, -73.975352, -73.993948, -74.009829, -74.002115, -74.007666,
-74.013754, -74.012441), bounds_west = c(-74.008621, -73.997532,
-73.999604, -74.047285, -74.012508, -74.015905, -74.013754, -73.988643,
-73.994028, -74.012359, -74.004575, -74.016381, -74.01599, -73.987746,
-73.981822, -74.007989, -74.003477, -73.98373, -73.968441, -73.977655,
-73.990149, -73.98814, -73.977092, -73.962475, -73.9659, -73.96323,
-73.955778, -73.948677, -73.960007, -73.950403, -73.944672, -73.947051,
-73.946462, -74.001702, -73.943398, -74.010542, -73.943506, -73.938947,
-73.961583, -73.972553, -73.996142, -73.965148, -73.979513, -73.984118,
-73.97845, -73.980886, -73.964424, -73.994844, -73.959921, -73.973068,
-73.973465, -73.973524, -73.951858, -73.979768, -73.975807, -73.978159,
-73.976974, -73.977107, -73.974897, -73.975352, -73.980395, -73.976048,
-73.976516, -74.00143, -74.011248, -74.00542, -74.009668, -74.019603,
-74.01831), zip = c(10001, 10002, 10003, 10004, 10005, 10006,
10007, 10009, 10010, 10011, 10012, 10013, 10014, 10016, 10017,
10018, 10019, 10020, 10021, 10022, 10023, 10024, 10025, 10026,
10027, 10028, 10029, 10030, 10031, 10032, 10033, 10034, 10035,
10036, 10037, 10038, 10039, 10040, 10044, 10065, 10069, 10075,
10103, 10110, 10111, 10112, 10115, 10119, 10128, 10152, 10153,
10154, 10162, 10165, 10167, 10168, 10169, 10170, 10171, 10172,
10173, 10174, 10177, 10199, 10271, 10278, 10279, 10280, 10282
)), row.names = c(1L, 2L, 3L, 4L, 5L, 6L, 7L, 9L, 10L, 11L, 12L,
13L, 14L, 16L, 17L, 18L, 19L, 20L, 21L, 22L, 23L, 24L, 25L, 26L,
27L, 28L, 29L, 30L, 31L, 32L, 33L, 34L, 35L, 36L, 37L, 38L, 39L,
40L, 43L, 50L, 51L, 53L, 67L, 74L, 75L, 76L, 79L, 83L, 91L, 101L,
102L, 103L, 111L, 114L, 116L, 117L, 118L, 119L, 120L, 121L, 122L,
123L, 126L, 133L, 151L, 158L, 159L, 160L, 162L), class = "data.frame")
Hey guys, so I am trying to fuzzy-join lat & lon information to get the zip code of a specific location. I tried:
test <- test %>% fuzzy_left_join(zip_codes,by = c("dropoff_longitude" = "bounds_east", "dropoff_longitude" = "bounds_west", "dropoff_latitude" = "bounds_north","dropoff_latitude" = "bounds_south"), match_fun = list('<=', '>=' , '<=', '>='))
But unfortunately, this returns the error message Error: All columns in a tibble must be vectors. x Column "col" is NULL.
I don't know how to solve this. There is no column "col" in either one of the data frames. The result should give me the correspondent zip code if the dropoff_longitute is between bounds_east and bounds_west and the dropoff_latitude is between bounds_north and bounds_south.
Thanks a lot in advance!
We could use the non-equi join from data.table as one of the dataset is data.table
library(data.table)
setDT(test)[zip_codes, on = .(dropoff_longitude <= bounds_east,
dropoff_longitude >= bounds_west,
dropoff_latitude <= bounds_north,
dropoff_latitude >= bounds_south)]
I am trying to mimic some figures from journal papers. Here is an example from Schlenker and Roberts (2009).
I'd like to add a similar histogram to my own plot. Please see below. Is it possible to achieve this task with ggplot? Thanks.
See a dput data below. rh represents x axis and yhat1 indicates the y axis.
> dput(df.m[,c('rh','yhat1')])
structure(list(rh = c(11L, 13L, 15L, 16L, 17L, 18L, 19L, 20L,
21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L, 31L, 32L, 33L,
34L, 35L, 36L, 37L, 38L, 39L, 40L, 41L, 42L, 43L, 44L, 45L, 46L,
47L, 48L, 49L, 50L, 51L, 52L, 53L, 54L, 55L, 56L, 57L, 58L, 59L,
60L, 61L, 62L, 63L, 64L, 65L, 66L, 67L, 68L, 69L, 70L, 71L, 72L,
73L, 74L, 75L, 76L, 77L, 78L, 79L, 80L, 81L, 82L, 83L, 84L, 85L,
86L, 87L, 88L, 89L, 90L, 91L, 92L, 93L, 94L, 95L, 96L, 97L, 98L,
99L, 100L), yhat1 = c(0.0097784, 0.111762325, 0.0887123966666667,
0.24714677, 0.079887235, 0.162714825, 0.24789043, 0.107558165,
0.182885584545455, 0.136690964444444, 0.159203683333333, 0.5156053805,
0.587034213636364, 0.233377613, 0.31531245, 0.4778449572, 0.212574774137931,
0.2274105676, 0.253733041707317, 0.560999839354839, 0.224892959444444,
0.392268151304348, 0.351498776603774, 0.366547010727273, 0.35013903469697,
0.382026272372881, 0.510611202461538, 0.391176294871795, 0.423356474328358,
0.380316089137931, 0.459821489651163, 0.388949226593407, 0.506833284166667,
0.459263999259259, 0.558535709906542, 0.745323656071429, 0.60167464606383,
0.72210854266129, 0.695203745656566, 0.638265557105263, 0.52373110503876,
0.611695133046875, 0.963833986386555, 0.803060819275362, 0.837984669112426,
0.7931166204, 0.870764136976744, 1.21005393820225, 0.862845527777778,
1.028402381125, 1.2077895633526, 1.01176334204082, 1.08139833964706,
0.90346288, 1.05871937863014, 1.27788244930233, 1.16250975336634,
1.1450916525, 1.4412301412, 1.21264826238281, 1.35417930411504,
1.18588206727273, 1.40277204710084, 1.33194569259259, 1.18413544210084,
1.22718163528571, 1.33992107226667, 1.44770425268156, 1.43974964777778,
1.26656031551351, 1.58998655363636, 1.29994566024272, 1.46398530493902,
1.26061274530055, 1.30718501225275, 1.20523443567901, 1.23789593428571,
1.34433582230769, 1.36438752851852, 1.5915544857037, 1.10979387898438,
1.31898147708661, 1.426120105, 1.52075980155738, 1.40629729460177,
0.9048366681, 1.2973945580531, 1.37696154192982)), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -88L))
Hopefully this can get you started:
library(ggplot2)
breaks <- 20
maxcount <- max(table(cut(df.m$rh,breaks = 20))) + 1
ggplot(data = df.m, aes(x = rh)) +
stat_smooth(formula = y ~ x, aes(y = yhat1 * 10 + maxcount), method = "loess") +
scale_y_continuous(breaks = c(0,5), "Exposure (Days)",
sec.axis = sec_axis(~ (. - maxcount) /10,
"Log of Daily Confirmed Case Counts")) +
geom_histogram(bins = breaks, color = "black", fill = "green") +
geom_segment(aes(x = 85, xend = 85, y = 0 + maxcount, yend = Inf),
col = "red", linetype = "dashed") +
labs(x = "Relative Humidity Percentage") + theme_classic() +
theme(axis.line.y.left = element_line(color= "green"),
axis.title.y.left = element_text(hjust = 0.05, color = "green"))