extract and organise textfile to dataframe - r

I have a huge text file with the following structure:
AA<-tibble::tribble(
~`-------------------------------------------------`,
"ABCD 2002201234 09-06-2015 10:34",
"-------------------------------------------------",
"Lorem ipsum",
"Lorem ipsum",
"Lorem ipsum Lorem ipsum",
"Lorem ipsum: Lorem ipsum",
"123456",
"AB",
"AB",
"Lorem ipsum",
"-------------------------------------------------",
"ABCDEF 1001101234 05-03-2011 09:15",
"-------------------------------------------------",
"TEST",
"TEST"
)
I want to organise the above into a DF with variables: ID, DATE and TEXT. ID should be the 10-digit number (in the example 2002201234 and 1001101234) DATE is self explanatory and TEXT should be all text between the bottom line ("-------------") to the upper line of next post.
Which is the easiest way to perform this?
Regards, H

in base R:
x <- paste(AA[[1]], collapse = '\n')
y <- regmatches(x, gregexec("(\\d{10}) *(.*?)\n-+([^-]+)", x, perl = TRUE))[[1]]
setNames(data.frame(t(y[2:4,])), c('ID', 'Date', 'Text'))
ID Date Text
<chr> <chr> <chr>
1 2002201234 09-06-2015 10:34 "\nLorem ipsum\nLorem ipsum\nLorem ipsum Lo…
2 1001101234 05-03-2011 09:15 "\nTEST\nTEST"

Here is a solution using pmap which might a bit overkill or slow depending how big your file is.
You need to adjust:
the correct date format (it's not unambiguous)
how the text should be collapsed, right now it is with a line break
library(stringr)
library(purrr)
library(dplyr)
AA <- tibble::tribble(
~X1,
"-------------------------------------------------",
"ABCD 2002201234 09-06-2015 10:34",
"-------------------------------------------------",
"Lorem ipsum",
"Lorem ipsum",
"Lorem ipsum Lorem ipsum",
"Lorem ipsum: Lorem ipsum",
"123456",
"AB",
"AB",
"Lorem ipsum",
"-------------------------------------------------",
"ABCDEF 1001101234 05-03-2011 09:15",
"-------------------------------------------------",
"TEST",
"TEST"
)
line_positions <- which(str_detect(AA$X1, "-------------------------------------------------"))
id_positions <- line_positions[seq(from = 1, to = length(line_positions), by = 2)] + 1
text_start_positions <- line_positions[seq(from = 2, to = length(line_positions), by = 2)] + 1
text_stop_positions <- c(line_positions[seq(from = 3, to = length(line_positions), by = 2)] - 1, nrow(AA))
clean_AA <- pmap_dfr(list(id_positions, text_start_positions, text_stop_positions),
function(id, start, stop) {
entry_info <- AA %>%
slice(id) %>%
pull(X1) %>%
str_split(., pattern = " ")
text_info <- AA %>%
slice(seq(from = start, to = stop)) %>%
pull(X1)
data.frame(
ID = entry_info[[1]][2],
DATE = as.Date(entry_info[[1]][3], format = "%d-%m-%Y"),
TEXT = paste0(text_info, collapse = "\n")
)
})
clean_AA
#> ID DATE
#> 1 2002201234 2015-06-09
#> 2 1001101234 2011-03-05
#> TEXT
#> 1 Lorem ipsum\nLorem ipsum\nLorem ipsum Lorem ipsum\nLorem ipsum: Lorem ipsum\n123456\nAB\nAB\nLorem ipsum
#> 2 TEST\nTEST
Created on 2023-02-06 by the reprex package (v1.0.0)

A solution using basic tidyverse packages. Look to the comments in the code for detailed explanations of the steps.
library(tidyverse)
library(lubridate)
separator <- "-------------------------------------------------"
tibble(
tx = c(names(AA), AA[[1]]) # take first line from name to data vector, this should be done during import
) |>
mutate(
grp = (tx == separator) %>% # detect separator lines
{. & lead(., 2)} |> # group begins with with a separator line followed by another after 2 lines
cumsum()
) |>
filter(tx != separator) |> # remove separator lines
nest(text = tx) |> # nest to make document the unit of observation
mutate(
fst = map_chr(text, \(x) x |> # extract first line containing meta info
pull(1) |>
first()),
id = str_extract(fst, "\\d{10}"), # Regex for 10 digit id string
date = str_extract(fst, "\\d{2}-\\d{2}-\\d{4}") |> # regex for date
lubridate::dmy(),
text = map_chr(text, \(x) x |> # collapse text body to single string
slice(-1) |>
pull(1) |>
str_c(collapse = "\n")),
.before = text
) |>
select(-fst)
#> # A tibble: 2 × 4
#> grp id date text
#> <int> <chr> <date> <chr>
#> 1 1 2002201234 2015-06-09 "Lorem ipsum\nLorem ipsum\nLorem ipsum Lorem ipsu…
#> 2 2 1001101234 2011-03-05 "TEST\nTEST"

I would use some simple sequential steps within the tidyverse. I would mainly use dplyr, tidyr and stringr.
library(dplyr)
library(tidyr)
library(stringr)
AA %>%
rename_with(~ "text") %>%
filter(!str_detect(text, "-{3,}")) %>% #remove "-----" lines
mutate(index = cumsum(str_detect(text, ".*\\d{10}.*"))) %>% #create id index column
group_by(index) %>%
mutate(temp = first(text)) %>% #separate id+date info into temporary column
extract(col = temp,
into = c("ID", "date"),
regex = ".*(\\d{10}).*(\\d{2}-\\d{2}-\\d{4}).*",
remove = TRUE) %>% #create "ID" and "date" columns from temp id
mutate(date = lubridate::dmy(date)) %>% #convert dates into proper date class
slice(-1) %>% #remove case headers/id rows
nest(text = text) %>% #one case per line, with a nested text variable
ungroup()
# A tibble: 2 × 4
index ID date text
<int> <chr> <chr> <list>
1 1 2002201234 09-06-2015 <tibble [8 × 1]>
2 2 1001101234 05-03-2011 <tibble [2 × 1]>
This would give us the desired output, with the text column as a list of tibbles with all the text data. It is fairly easy to handle these tibbles after that:
pull(AA,text)
[[1]]
# A tibble: 8 × 1
text
<chr>
1 Lorem ipsum
2 Lorem ipsum
3 Lorem ipsum Lorem ipsum
4 Lorem ipsum: Lorem ipsum
5 123456
6 AB
7 AB
8 Lorem ipsum
[[2]]
# A tibble: 2 × 1
text
<chr>
1 TEST
2 TEST
OR
mutate(AA, text = map(text, pull))
# A tibble: 2 × 4
index ID date text
<int> <chr> <chr> <list>
1 1 2002201234 09-06-2015 <chr [8]>
2 2 1001101234 05-03-2011 <chr [2]>

Related

Extract digits from strings in R

i have a dataframe which contains a text string like below that shows the ingredients and the proportion of each ingredient. What i would like to achive is to extract the proportion of each ingredient as a separate variable:
What i have:
given <- tibble(
ingredients =c("1.5BZ+1FZ+2HT","2FZ","0.5HT+2BZ")
)
What i want to achive:
to_achieve <- tibble(
ingredients =c("1.5BZ+1FZ+2HT","2FZ","0.5HT+2BZ"),
proportion_bz = c(1.5,0,2),
proportion_fz = c(1,2,0),
proportion_ht=c(2,2,0.5)
)
Please note there might be more than a dozen different ingredients and tidyverse methods are preferred.
Thanks in advance,
Felix
Making heavy use of tidyr you could first split your strings into rows per ingredient using separate_rows, afterwards extract the numeric proportion and the type of ingredient and finally use pivot_wider to reshape into your desired format:
library(dplyr)
library(tidyr)
given %>%
mutate(ingredients_split = ingredients) |>
tidyr::separate_rows(ingredients_split, sep = "\\+") |>
tidyr::extract(
ingredients_split,
into = c("proportion", "ingredient"),
regex = "^([\\d+\\.]+)(.*)$"
) |>
mutate(
proportion = as.numeric(proportion),
ingredient = tolower(ingredient)
) |>
pivot_wider(
names_from = ingredient,
names_prefix = "proportion_",
values_from = proportion,
values_fill = 0
)
#> # A tibble: 3 × 4
#> ingredients proportion_bz proportion_fz proportion_ht
#> <chr> <dbl> <dbl> <dbl>
#> 1 1.5BZ+1FZ+2HT 1.5 1 2
#> 2 2FZ 0 2 0
#> 3 0.5HT+2BZ 2 0 0.5
library(tidyr)
library(readr)
library(stringr)
library(janitor)
# SOLUTION -----
given %>%
separate(ingredients, into = c("a", "b", "c"), sep = "\\+", remove = F) %>%
pivot_longer(a:c) %>%
select(-name) %>%
mutate(name = str_remove_all(value, "[0-9]|\\."),
value = parse_number(value)) %>%
na.omit() %>%
pivot_wider(names_prefix = "proportion_", values_fill = 0) %>%
clean_names()
# OUTPUT ----
#># A tibble: 3 × 4
#> ingredients proportion_bz proportion_fz proportion_ht
#> <chr> <dbl> <dbl> <dbl>
#>1 1.5BZ+1FZ+2HT 1.5 1 2
#>2 2FZ 0 2 0
#>3 0.5HT+2BZ 2 0 0.5

Split a list of elements into multiple columns with Key - Value

Working with large dataframe with a string that is a list of elements (as chr). I want to separate the string so that each element has its own column with key - value. I tried 'tidyr::seperate' and 'tidyverse::unnest_wider()' but none of them returned my desired output.
Here is a dummy data :
df1 <- tibble(
id = c('000914', '000916'),
code = c('NN', 'SS'),
values2 = c("{DS=15}{FPLUC=0}{N=CELL}{R=NINT1}{S=true}{SPLUC=1}" , "{DS=0}{FPLUC=0}{N=CELL}{R=NINT1}{S=true}{SPLUC=1}"
) )
# A tibble: 2 x 3
id code values2
<chr> <chr> <chr>
1 000914 NN {DS=15}{FPLUC=0}{N=CELL}{R=NINT1}{S=true}{SPLUC=1}
2 000916 SS {DS=0}{FPLUC=0}{N=CELL}{R=NINT1}{S=true}{SPLUC=1}
I tried with separate, this is not wrong but it is not exactly what I am looking for and it needs several pivot_longer and pivot_wider for reshaping. Is there any better and faster alternative?
df1 %>%
separate(values2, into = paste("Col", 1:14))
# A tibble: 2 x 16
id code `Col 1` `Col 2` `Col 3` `Col 4` `Col 5` `Col 6` `Col 7` `Col 8` `Col 9`
<chr> <chr> <chr> <chr> <chr> <chr> <chr> <chr> <chr> <chr> <chr>
1 000914 NN "" DS 15 FPLUC 0 N CELL R NINT1
2 000916 SS "" DS 0 FPLUC 0 N CELL R NINT1
# ... with 5 more variables: Col 10 <chr>, Col 11 <chr>, Col 12 <chr>, Col 13 <chr>,
# Col 14 <chr>
Here is my desired output:
id code DS FPLUC N R S SPLUC
1 000914 NN 15 0 CELL NINT1 true 1
2 000916 SS 0 0 CELL NINT1 true 1
Alternative solution:
library(tidyverse)
df1 %>%
mutate(values2 = str_remove(values2, "{"),
values2 = str_remove(values2, "}")) %>%
tidyr::extract(values2,
c("DS", "FPLUC", "N", "R", "S", "SPLUC"),
"(.*?)=(.*?)",
extra = "drop")
The regular expression "(.?)=(.?)" matches the text between = signs, capturing the key and the value as separate groups. The c("DS", "FPLUC", "N", "R", "S", "SPLUC") argument specifies the names of the new columns that will be created based on the extracted key-value pairs. The extra = "drop" argument drops any unmatched text.
A pure tidyr solution:
library(tidyr)
df1 %>%
separate_rows(values2, sep = '(?<=\\})(?=\\{)') %>%
extract(values2, c('name', 'value'), '\\{(.+?)=(.+?)\\}') %>%
pivot_wider()
# # A tibble: 2 × 8
# id code DS FPLUC N R S SPLUC
# <chr> <chr> <chr> <chr> <chr> <chr> <chr> <chr>
# 1 000914 NN 15 0 CELL NINT1 true 1
# 2 000916 SS 0 0 CELL NINT1 true 1
separate_rows() separates a collapsed column (values2) into multiple rows. The separator (?<=\\})(?=\\{) locates the position between } and {.
extract() separates a character column into multiple columns using regular expression groups. The regex \\{(.+?)=(.+?)\\} searches the pattern {Col=Value} and extracts Col and Value respectivly as new columns.
It's messy but you may try
library(tidverse)
nms <- str_extract_all(df1$values2[1], "(?<=\\{).+?(?=\\=)", simplify = T)
nms <- c(names(df1)[-3],nms)
df1 %>%
mutate(values2 = str_extract_all(values2, "(?<=\\=).+?(?=\\})")) %>%
unnest_wider(values2, names_repair = ~nms)
id code DS FPLUC N R S SPLUC
<chr> <chr> <chr> <chr> <chr> <chr> <chr> <chr>
1 000914 NN 15 0 CELL NINT1 true 1
2 000916 SS 0 0 CELL NINT1 true 1
If you are not so keen on the REGEX, try the following
library(dplyr, quietly=TRUE, warn.conflicts=FALSE)
#> Warning: package 'dplyr' was built under R version 4.1.3
library(tidyr)
df1 <- tibble(
id = c('000914', '000916'),
code = c('NN', 'SS'),
values2 = c("{DS=15}{FPLUC=0}{N=CELL}{R=NINT1}{S=true}{SPLUC=1}" , "{DS=0}{FPLUC=0}{N=CELL}{R=NINT1}{S=true}{SPLUC=1}"
) )
df1
#> # A tibble: 2 x 3
#> id code values2
#> <chr> <chr> <chr>
#> 1 000914 NN {DS=15}{FPLUC=0}{N=CELL}{R=NINT1}{S=true}{SPLUC=1}
#> 2 000916 SS {DS=0}{FPLUC=0}{N=CELL}{R=NINT1}{S=true}{SPLUC=1}
df1 %>%
mutate(values2 = stringr::str_remove_all(values2, "\\}")) %>% # remove the } from values 2
separate(values2, into = c("X","DS","FPLUC","N","R","S","SPLUC"), sep = "{") %>% # split values 2 into required columns
mutate(across(.cols = c(DS, FPLUC, N, R, S, SPLUC),
.fns = ~stringr::str_remove(.x, "^.+="))) %>% #remove "xxx=" from each of the columns
select(!X) # keep all columns except X as it is empty
#> # A tibble: 2 x 8
#> id code DS FPLUC N R S SPLUC
#> <chr> <chr> <chr> <chr> <chr> <chr> <chr> <chr>
#> 1 000914 NN 15 0 CELL NINT1 true 1
#> 2 000916 SS 0 0 CELL NINT1 true 1

mutate(foo = bar + bla) but sometimes bar or bla don't exist which results in error message

I have a data frame that usually looks like this:
structure(list(date = structure(18780, class = "Date"), bar = 1L,
Sessions = 2990L, `bla` = 20L), row.names = c(NA,
-1L), class = c("tbl_df", "tbl", "data.frame"))
Looks like:
# A tibble: 1 x 4
date bar Sessions bla
<date> <int> <int> <int>
1 2021-06-02 1 2990 20
With this I mutate:
mydf %>% mutate(foo = bar + bla)
# A tibble: 1 x 5
date bar Sessions bla foo
<date> <int> <int> <int> <int>
1 2021-06-02 1 2990 20 21
But, this is within the context of a shiny app with user filters. Sometimes, the resulting data frame after user inputs there's a dataframe with no bar or bla field. So when I add them during mutate I get
Error: Problem with `mutate()` input `foo`.
x object 'bar' not found
ℹ Input `foo` is ``bar` + `bla``
In the case where either bar or bla doesn't exist, I'd still like the sum of the remaining one in my mutate. Else just create the new feature foo but give it a value of 0. In R english something like:
mydf %>% mutate(foo = if(bar exists then bar else 0) + if(bla exists then bla else 0))
Is there a 'nice' or elegant way of doing this? Perhaps tidyverse approach?
Perhaps, this might help using any_of -
library(dplyr)
cols <- c('bar', 'bla')
df %>% mutate(foo = rowSums(select(., any_of(cols))))
# A tibble: 1 x 5
# date bar Sessions bla foo
# <date> <int> <int> <int> <dbl>
#1 2021-06-02 1 2990 20 21
If bar is absent this will still work -
df %>%
select(-bar) %>%
mutate(foo = rowSums(select(., any_of(cols))))
# date Sessions bla foo
# <date> <int> <int> <dbl>
#1 2021-06-02 2990 20 20
An alternative way using apply function
library(dplyr)
mydf %>%
mutate(foo =apply(select(.,"bar","bla"), 1, sum))
# A tibble: 1 x 5
date bar Sessions bla foo
<date> <int> <int> <int> <int>
1 2021-06-02 1 2990 20 21
We could use tidyverse methods
library(dplyr)
library(purrr)
df1 %>%
select(-bar) %>%
mutate(foo = select(cur_data(), any_of(cols)) %>%
coalesce(., tibble(!! cols[1] := 0)) %>%
reduce(`+`))
# A tibble: 1 x 4
# date Sessions bla foo
# <date> <int> <int> <int>
#1 2021-06-02 2990 20 20
If both columns are not present
df1 %>%
select(-all_of(cols)) %>%
mutate(foo = select(cur_data(), any_of(cols)) %>%
coalesce(., tibble(!! cols[1] := 0)) %>%
reduce(`+`))
# A tibble: 1 x 3
date Sessions foo
<date> <int> <dbl>
1 2021-06-02 2990 0
I created this function in a package of mine to do just what you described at the end. It adds in a column to the data set with a certain value - but only if it is missing.
add_missing_column <- function(.data, ..., .before = NULL, .after = NULL, .name_repair = c("check_unique", "unique", "universal", "minimal")) {
.dots <- rlang::enquos(...)
cols_to_add <- .dots[!names(.dots) %in% names(.data)]
tibble::add_column(.data, !!!cols_to_add, .before = .before, .after = .after, .name_repair = .name_repair)
}
So in this case I would do something like this.
library(dplyr)
df %>%
add_missing_column(bar = 0L,
bla = 0L) %>%
mutate(foo = bar + bla)
Or more often, I set the columns in a list.
missing_cols <- list(bar = 0L, bla = 0L)
df %>%
add_missing_column(!!!missing_cols) %>%
mutate(foo = bar + bla)
You can set the default to anything you want, since it just forwards to add_column().

Anonymizing selected columns of a data frame in a tidy way

I'm looking for a tidy way to anonymize selected columns of a data frame.
The best I could come up with is to define a mapping table and then using plyr::mapvalues(), but I can't wrap my head around generalizing this to make it work with in conjunction with dplyr::mutate_at() (see pseudo code below).
Or would this best be done via purrr::map2()?
library(magrittr)
df <- tibble::tribble(
~name, ~surname, ~value,
"John", "Doe", 10,
"Jane", "Doe", 20
)
seed <- 2093
cols_to_anon <- c("name", "surname")
recode_table <- cols_to_anon %>%
dplyr::syms() %>%
purrr::map(function(.x) {
uniques <- df %>%
dplyr::distinct(!!.x) %>%
dplyr::pull()
n <- length(uniques)
set.seed(seed)
original <- uniques[sample(1:n)]
set.seed(seed)
anon_1 <- sample(LETTERS, n, replace = TRUE)
set.seed(seed)
anon_2 <- sample(1:1000, n, replace = TRUE)
anon <- stringr::str_glue("{anon_1}{anon_2}")
tibble::tibble(original, anon)
}) %>%
purrr::set_names(cols_to_anon)
recode_table
#> $name
#> # A tibble: 2 x 2
#> original anon
#> <chr> <S3: glue>
#> 1 Jane W875
#> 2 John D149
#>
#> $surname
#> # A tibble: 1 x 2
#> original anon
#> <chr> <S3: glue>
#> 1 Doe W875
df_anon <- df %>%
dplyr::mutate(
name = plyr::mapvalues(name,
recode_table$name$original,
recode_table$name$anon
),
surname = plyr::mapvalues(surname,
recode_table$surname$original,
recode_table$surname$anon
)
)
df_anon
#> # A tibble: 2 x 3
#> name surname value
#> <chr> <chr> <dbl>
#> 1 D149 W875 10
#> 2 W875 W875 20
Created on 2019-05-16 by the reprex package (v0.2.1.9000)
PSEUDO CODE OF "DESIRED" SOLUTION
df_anon <- df %>%
dplyr::mutate_at(
dplyr::vars(one_of(cols_to_anon)),
~plyr::mapvalues(<col_name_i>,
mtable_list[[<col_name_i>]]$original,
mtable_list[[<col_name_i>]]$anon
)
)
with `<col_name_i>` being the name of the respective column that is to be anonymized
One approach would be:
library(rlang)
library(stringr)
library(tidyverse)
df <- tibble::tribble(
~name, ~surname, ~value,
"John", "Doe", 10,
"Jane", "Doe", 20
)
df
my_selection <- exprs(name, surname)
map(df %>%
select(!!!my_selection),
~enframe(unique(.), name = NULL, value = "original") %>%
mutate(anon = str_c(sample(LETTERS, n(), replace = TRUE),
sample(1:1000, n(), replace = TRUE),
sep = ""))) -> recode_table
recode_table
# $name
# # A tibble: 2 x 2
# original anon
# <chr> <chr>
# 1 John F330
# 2 Jane O445
#
# $surname
# # A tibble: 1 x 2
# original anon
# <chr> <chr>
# 1 Doe N710
imap_dfc(recode_table,
~df %>%
select(..2) %>%
`colnames<-`("original") %>%
left_join(recode_table[[..2]], by = "original") %>%
select(-original) %>%
`colnames<-`(..2)) %>%
cbind(
df %>%
select(-c(!!!my_selection))) -> df_anon
df_anon
# name surname value
# 1 F330 N710 10
# 2 O445 N710 20

tidyverse - prefered way to turn a named vector into a data.frame/tibble

Using the tidyverse a lot i often face the challenge of turning named vectors into a data.frame/tibble with the columns being the names of the vector.
What is the prefered/tidyversey way of doing this?
EDIT: This is related to: this and this github-issue
So i want:
require(tidyverse)
vec <- c("a" = 1, "b" = 2)
to become this:
# A tibble: 1 × 2
a b
<dbl> <dbl>
1 1 2
I can do this via e.g.:
vec %>% enframe %>% spread(name, value)
vec %>% t %>% as_tibble
Usecase example:
require(tidyverse)
require(rvest)
txt <- c('<node a="1" b="2"></node>',
'<node a="1" c="3"></node>')
txt %>% map(read_xml) %>% map(xml_attrs) %>% map_df(~t(.) %>% as_tibble)
Which gives
# A tibble: 2 × 3
a b c
<chr> <chr> <chr>
1 1 2 <NA>
2 1 <NA> 3
This is now directly supported using bind_rows (introduced in dplyr 0.7.0):
library(tidyverse))
vec <- c("a" = 1, "b" = 2)
bind_rows(vec)
#> # A tibble: 1 x 2
#> a b
#> <dbl> <dbl>
#> 1 1 2
This quote from https://cran.r-project.org/web/packages/dplyr/news.html explains the change:
bind_rows() and bind_cols() now accept vectors. They are treated as rows by the former and columns by the latter. Rows require inner names like c(col1 = 1, col2 = 2), while columns require outer names: col1 = c(1, 2). Lists are still treated as data frames but can be spliced explicitly with !!!, e.g. bind_rows(!!! x) (#1676).
With this change, it means that the following line in the use case example:
txt %>% map(read_xml) %>% map(xml_attrs) %>% map_df(~t(.) %>% as_tibble)
can be rewritten as
txt %>% map(read_xml) %>% map(xml_attrs) %>% map_df(bind_rows)
which is also equivalent to
txt %>% map(read_xml) %>% map(xml_attrs) %>% { bind_rows(!!! .) }
The equivalence of the different approaches is demonstrated in the following example:
library(tidyverse)
library(rvest)
txt <- c('<node a="1" b="2"></node>',
'<node a="1" c="3"></node>')
temp <- txt %>% map(read_xml) %>% map(xml_attrs)
# x, y, and z are identical
x <- temp %>% map_df(~t(.) %>% as_tibble)
y <- temp %>% map_df(bind_rows)
z <- bind_rows(!!! temp)
identical(x, y)
#> [1] TRUE
identical(y, z)
#> [1] TRUE
z
#> # A tibble: 2 x 3
#> a b c
#> <chr> <chr> <chr>
#> 1 1 2 <NA>
#> 2 1 <NA> 3
The idiomatic way would be to splice the vector with !!! within a tibble() call so the named vector elements become column definitions :
library(tibble)
vec <- c("a" = 1, "b" = 2)
tibble(!!!vec)
#> # A tibble: 1 x 2
#> a b
#> <dbl> <dbl>
#> 1 1 2
Created on 2019-09-14 by the reprex package (v0.3.0)
This works for me: c("a" = 1, "b" = 2) %>% t() %>% tbl_df()
Interestingly you can use the as_tibble() method for lists to do this in one call. Note that this isn't best practice since this isn't an exported method.
tibble:::as_tibble.list(vec)
as_tibble(as.list(c(a=1, b=2)))

Resources