Convert Stata 16 files to Stata 12 files using R - r

I am using RStudio (running R 4.0.1) and Stata 12 for Windows and have got a large number of folders with Stata 16 .dta files (and other types of files not relevant to this question). I want to create an automated process of converting all Stata 16 .dta files into Stata 12 format (keeping all labels) to then analyze.
Ideally, I want to keep the names of the original folders and files but save the converted versions into a new location.
This is what I have got so far:
setwd("C:/FilesLocation")
#vector with name of files to be converted
all_files <- list.files(pattern="*.dta",full.names = TRUE)
for (i in all_files){
#Load file to be converted into STATA12 version
data <- read_dta("filename.dta",
encoding = NULL,
col_select = NULL,
skip = 0,
n_max = Inf,
.name_repair = "unique")
#Write as .dta
write_dta(data,"c:/directory/filename.dta", version = 12, label = attr(data, "label"))
}
Not sure this is the best approach. I know the commands inside the loop are working for a single file but not really being able to automate for all files.

Your code only needs some very minor modifications. I've indicated the changes (along with comments explaining them) in the snippet below.
library(haven)
mypath <- "C:/FilesLocation"
all_files <- list.files(path = mypath, pattern = "*.dta", full.names = TRUE)
for (i in 1:length(all_files)){
#(Above) iterations need the length of the vector to be specified
#Load file to be converted into STATA12 version
data <- read_dta(all_files[i], #You want to read the ith element in all_files
encoding = NULL,
col_select = NULL,
skip = 0,
n_max = Inf,
.name_repair = "unique")
#Add a _v12 to the filename to
#specify that is is version 12 now
new_fname <- paste0(unlist(strsplit(basename(all_files[i]), "\\."))[1],
"_v12.", unlist(strsplit(basename(all_files[i]), "\\."))[2])
#Write as .dta
#with this new filename
write_dta(data, path = paste0(mypath, "/", new_fname),
version = 12, label = attr(data, "label"))
}
I tried this out with some .sta files from here, and the script ran without throwing up errors. I haven't tested this on Windows but in theory it should work fine.
Edit: here is a more complete solution with read_dta and write_dta wrapped into a single function dtavconv. This function also allows the user to convert version numbers to arbitrary values (default is 12).
#----
#.dta file version conversion function
dtavconv <- function(mypath = NULL, myfile = NULL, myver = 12){
#Function to convert .dta file versions
#Default version files are converted to is v12
#Default directory is whatever is specified by getwd()
if(is.null(mypath)) mypath <- getwd()
#Main code block wrapped in a tryCatch()
myres <- tryCatch(
{
#Load file to be converted into STATA12 version
data <- haven::read_dta(paste0(mypath, "/", myfile),
encoding = NULL,
col_select = NULL,
skip = 0,
n_max = Inf,
.name_repair = "unique")
#Add a _v12 to the filename to
#specify that is is version 12 now
new_fname <- paste0(unlist(strsplit(basename(myfile), "\\."))[1],
"_v", myver, ".", unlist(strsplit(basename(myfile), "\\."))[2])
#Write as .dta
#with this new filename
haven::write_dta(data, path = paste0(mypath, "/", new_fname),
version = myver, label = attr(data, "label"))
message("\nSuccessfully converted ", myfile, " to ", new_fname, "\n")
},
error = function(cond){
#message("Unable to write file", myfile, " as ", new_fname)
message("\n", cond, "\n")
return(NA)
}
)
return(myres)
}
#----
The function can then be run on as many files as desired by invoking it via lapply or a for loop, as the example below illustrates:
#----
#Example run
library(haven)
#Set your path here below
mypath <- paste0(getwd(), "/", "dta")
#Check to see if this directory exists
#if not, create it
if(!dir.exists(mypath)) dir.create(mypath)
list.files(mypath)
# character(0)
#----
#Downloading some valid example files
myurl <- c("http://www.principlesofeconometrics.com/stata/airline.dta",
"http://www.principlesofeconometrics.com/stata/cola.dta")
lapply(myurl, function(x){ download.file (url = x, destfile = paste0(mypath, "/", basename(x)))})
#Also creating a negative test case
file.create(paste0(mypath, "/", "anegcase.dta"))
list.files(mypath)
# [1] "airline.dta" "anegcase.dta" "cola.dta"
#----
#Getting list of files in the directory
all_files <- list.files(path = mypath, pattern = "*.dta")
#Converting files using dtavconv via lapply
res <- lapply(all_files, dtavconv, mypath = mypath)
#
# Successfully converted airline.dta to airline_v12.dta
#
#
# Error in df_parse_dta_file(spec, encoding, cols_skip, n_max, skip,
# name_repair = .name_repair): Failed to parse /my/path/
# /dta/anegcase.dta: Unable to read from file.
#
#
#
# Successfully converted cola.dta to cola_v12.dta
#
list.files(mypath)
# [1] "airline_v12.dta" "airline.dta" "anegcase.dta" "cola_v12.dta"
# "cola.dta"
#Example for converting to version 14
res <- lapply(all_files, dtavconv, mypath = mypath, myver = 14)
#
# Successfully converted airline.dta to airline_v14.dta
#
#
# Error in df_parse_dta_file(spec, encoding, cols_skip, n_max, skip,
# name_repair = .name_repair): Failed to parse /my/path
# /dta/anegcase.dta: Unable to read from file.
#
#
#
# Successfully converted cola.dta to cola_v14.dta
#
list.files(mypath)
# [1] "airline_v12.dta" "airline_v14.dta" "airline.dta" "anegcase.dta"
# "cola_v12.dta" "cola_v14.dta" "cola.dta"
#----

Related

R: Importing file using rio and here packages in a nested function

I'm working on functions that can take the chracter string argument GSE_expt. I have written 4 separate functions which take the argument GSE_expt and produce the output that I am able to save as a variable in the R environment.
The code block below has 2 of those functions. I use paste0 function with the variable GSE_expt to create a file name that the here and rio packages can use to import the file.
# Extracting metadata from 2 different sources and combining them into a single file
extract_metadata <- function(GSE_expt){
GSE_expt <- deparse(substitute(GSE_expt)) # make sure it is a character string
metadata_1 <- rnaseq_metadata_allsamples %>% # subset a larger metadata file
as_tibble %>%
dplyr::filter(GSE == GSE_expt)
# metadata from ENA imported using rio and here packages
metadata_2 <- import(here("metadata", "rnaseq", paste0(GSE_expt, ".txt"))) %>%
as_tibble %>%
select("run_accession","library_layout", "library_strategy","library_source","read_count", "base_count", "sample_alias", "fastq_md5")
metadata <- full_join(metadata_1, metadata_2, by = c("Run"="run_accession"))
return(metadata)
}
# Extracting coverage stats obtained from samtools
clean_extract_coverage <- function(GSE_expt){
coverage <- read_tsv(file = here("results","rnaseq","2022-01-11", "coverage", paste0("coverage_stats_", deparse(substitute(GSE_expt)), "_percent.txt")), col_names = FALSE)
coverage <- data.frame("Run" = coverage$X1[c(TRUE, FALSE)],
"stats" = coverage$X1[c(FALSE, TRUE)])
coverage <- separate(coverage, stats, into = c("num_reads", "covered_bases", "coverage_percent"), convert = TRUE)
return(coverage)
}
The functions work fine on their own individually when I use GSE118008 as the variable for the argument GSE_expt.
I am trying to create a nested/combined function so that I can run GSE118008 on both (or more) functions at the same time and save the output as a list.
When I ran a nested/combined function,
extract_coverage_metadata <- function(GSE_expt){
coverage <- clean_extract_coverage(GSE_expt)
metadata <- extract_metadata(GSE_expt)
return(metadata)
}
extract_coverage_metadata(GSE118008)
This is the error message I got.
Error: 'results/rnaseq/2022-01-11/coverage/coverage_stats_GSE_expt_percent.txt' does not exist.
Rather than creating a filename
coverage_stats_GSE118008_percent.txt
(which it does fine with the individual function), it is unable to do so in this combined function, and instead returns the filename coverage_stats_GSE_expt_percent.txt
Traceback
8. stop("'", path, "' does not exist", if (!is_absolute_path(path)) { paste0(" in current working directory ('", getwd(), "')") }, ".", call. = FALSE)
7. check_path(path)
6. (function (path, write = FALSE) { if (is.raw(path)) { return(rawConnection(path, "rb")) ...
5. vroom_(file, delim = delim %||% col_types$delim, col_names = col_names, col_types = col_types, id = id, skip = skip, col_select = col_select, name_repair = .name_repair, na = na, quote = quote, trim_ws = trim_ws, escape_double = escape_double, escape_backslash = escape_backslash, ...
4. vroom::vroom(file, delim = "\t", col_names = col_names, col_types = col_types, col_select = { { col_select ...
3. read_tsv(file = here("results", "rnaseq", "2022-01-11", "coverage", paste0("coverage_stats_", deparse(substitute(GSE_expt)), "_percent.txt")), col_names = FALSE) at rnaseq_functions.R#30
2. clean_extract_coverage(GSE_expt)
1. extract_coverage_metadata(GSE118008)
I would appreciate any recommendations on how to solve this.
Thanks in advance!
Husain

How do I run a for loop on multiple word files in R

I have 44 doc files. From each file, I need to extract the customer name and amount. I am able to this for one file using the read_document command and using the grep to extract the amount and customer name. When I do this for 44 files, I am getting an error. Not sure where I am wrong:
ls()
rm(list = ls())
files <- list.files("~/experiment", ".doc")
files
length(files)
for (i in length(files)){
library(textreadr)
read_document(files[i])
}
Here is the full code that I run on one file:
file <- "~/customer_full_file.docx"
library(textreadr)
full_customer_file <- read_document(file, skip = 0, remove.empty = TRUE, trim = TRUE)
#checking file is read correctly
head(full_customer_file)
tail(full_customer_file)
# Extracting Name
full_customer_file <- full_customer_file[c(1,4)]
amount_extract <- grep("Amount", full_customer_file, value = T)
library(tm)
require(stringr)
amount_extract_2 <- lapply(amount_extract, stripWhitespace)
amount_extract_2 <- str_remove(marks_extract_2, "Amount")
name_extract <- grep("Customer Name and ID: ", full_customer_file, value = T)
name_extract
name_extract_2 <- lapply(name_extract, stripWhitespace)
name_extract_2 <- str_remove(name_extract_2, "Customer Name and ID: ")
name_extract_2 <- as.data.frame(name_extract_2)
names(name_extract_2)[1] <- paste("customer_full_name")
amount_extract_2 <- as.data.frame(amount_extract_2)
names(amount_extract_2)[1] <- paste("amount")
amount_extract_2
customer_final_file <- cbind(name_extract_2, amount_extract_2)
write.table(customer_final_file, "~/customer_amount.csv", sep = ",", col.names = T, append = T)
Here is the code that I run on 44 file
ls()
rm(list = ls())
files <- list.files("~/experiment", ".doc")
files
length(files)
library(textreadr)
for (i in 1:length(files)){
read_document(files[i])
}
Here is the error that I am getting:
> library(textreadr)
> for (i in 1:length(files)){
+ read_document(files[i])
+ }
Warning messages:
1: In utils::unzip(file, exdir = tmp) :
error 1 in extracting from zip file
2: In utils::unzip(file, exdir = tmp) :
error 1 in extracting from zip file
3: In utils::unzip(file, exdir = tmp) :
error 1 in extracting from zip file
4: In utils::unzip(file, exdir = tmp) :
error 1 in extracting from zip file
5: In utils::unzip(file, exdir = tmp) :
error 1 in extracting from zip file
I could give you my code, which I used to analyze different word files through the sentimentr package in R. I guess you can use the same structure that I have and just change the for in function to loop the extraction for every docx.
And this is the code:
library(sentimentr)
folder_path <- "C:\\Users\\yourname\\Documents\\R\\"
# Get a list of all the docx files in the folder
docx_files <- list.files(path = folder_path, pattern = "\\.docx$", full.names = TRUE)
# Create an empty data frame to store the results
results <- data.frame(file = character(0), sentiment = numeric(0))
# Loop over the list of files
for (file in docx_files) {
# Read the docx file
sample_data <- read_docx(file)
# Extract the content and create a summary
content <- docx_summary(sample_data)
law <- content[sapply(strsplit(as.character(content$text),""),length)>5,]
# Calculate the sentiment of the summary (or in your case extraction)
sentiment <- sentiment_by(as.character(law$text))
# Add a row to the data frame with the results for this file
results <- rbind(results, data.frame(file = file, sentiment = sentiment$ave_sentiment))
}
# View the results data frame
View(results)
I hope that is near enough to your problem to solve it

How to write a.dbf file

I'm encountering issue using the below script. All are working fine except for the final line which results to the error below.
# read dbf
library(foreign)
setwd("C:/Users/JGGliban/Desktop/Work/ADMIN/Other Stream/PH")
# Combine multiple dbf files
# library('tidyverse')
# List all files ending with dbf in directory
dbf_files <- list.files(pattern = c("*.DBF","*.dbf"), full.names = TRUE)
# Read each dbf file into a list
dbf_list <- lapply(dbf_files, read.dbf, as.is = FALSE)
# Concatenate the data in each dbf file into one combined data frame
data <- do.call(rbind, dbf_list)
# Write dbf file - max-nchar is the maimum number of characters allowed in a character field. After the max, it will be truncated.
x <- write.dbf(data, file, factor2char = TRUE, max_nchar = 254)
Code modified to:
x <- write.dbf(data, "file.dbf", factor2char = TRUE, max_nchar = 254)

R write.csv is creating an empty file

Some background for my question: This is an R script that a previous research assistant wrote, but he did not provide any guidance to me on using it for myself. After working through an R textbook, I attempted to use the code on my data files.
What this code is supposed to do is load multiple .csv files, delete certain items/columns from them, and then write the new cleaned .csv files to a specified directory.
Currently, the files are being created in the right directory with the right file name, but the .csv files that are being created are empty.
I am currently getting the following error message:
Warning in
fread(input = paste0("data/", str_match(pattern = "CAFAS|PECFAS",: Starting data input on line 2 and discarding line 1 because it has too few or too many items to be column names or data: (variable names).
This is my code:
library(data.table)
library(magrittr)
library(stringr)
# create a function to delete unnecessary variables from a CAFAS or PECFAS
data set and save the reduced copy
del.items <- function(file){
data <- fread(input = paste0("data/", str_match(pattern = "CAFAS|PECFAS",
string = file) %>% tolower, "/raw/", file), sep = ",", header = TRUE,
na.strings = "", stringsAsFactors = FALSE, skip = 0, colClasses =
"character", data.table = FALSE)
data <- data[-grep(pattern = "^(CA|PEC)FAS_E[0-9]+(TR?(Initial|[0-
9]+|Exit)|SP[a-z])_(G|S|Item)[0-9]+$", x = names(data))]
write.csv(data, file = paste0("data/", str_match(pattern = "CAFAS|PECFAS",
string = file) %>% tolower, "/items-del/", sub(pattern = "ExportData_", x =
file, replacement = "")) %>% tolower, row.names = FALSE)
}
# delete items from all cafas data sets
cafas.files <- list.files("data/cafas/raw", pattern = ".csv")
for (file in cafas.files){
del.items(file)
}
# delete items from all pecfas data sets
pecfas.files <- list.files("data/pecfas/raw", pattern = ".csv")
for (file in pecfas.files){
del.items(file)
}

Error in reading multple text files from directory in R

I would like to read multiple text files from my directory the files are arranged in following format
regional_vol_GM_atlas1.txt
regional_vol_GM_atlas2.txt
........
regional_vol_GM_atlas152.txt
Data from the files looks in following format
667869 667869
580083 580083
316133 316133
3631 3631
following is the script that i have written
library(readr)
library(stringr)
library(data.table)
array <- c()
for (file in dir(/media/dev/Daten/Task1/subject1/t1)) # path to the directory where .txt files are located
{
row4 <- read.table(file=list.files(pattern ="regional_vol*.txt"),
header = FALSE,
row.names = NULL,
skip = 3, # Skip the 1st 3 rows
nrows = 1, # Read only the next row after skipping the 1st 3 rows
sep = "\t") # change the separator if it is not "\t"
array <- cbind(array, row4)
}
I am incurring following error
Error in file(file, "rt") : invalid 'description' argument
kindly suggest me where i was wrong in the script
This seems to work fine for me. Make changes as per code comments in case files have headers :
[Answer Edited to reflect new information posted by OP]
# rm(list=ls()) #clean memory if you can afford to
mydir<- "~/Desktop/a" #change as per your path
# read full paths
myfiles<- list.files(mydir,pattern = "regional_vol*",full.names=T)
myfiles #check that files listed correctly
# initialise the dataframe from first file
# change header =T/F depending on presence of header
# make sure sep is correct
df<- read.csv( myfiles[1], header = F, skip = 0, nrows = 4, sep="" )[-c(1:3),]
#check that first line was read correctly
df
#read all the other files and update dataframe
#we read 4 lines to read the header correctly, then remove 3
ans<- lapply(myfiles[-1], function(x){ read.csv( x, header = F, skip = 0, nrows = 4, sep="")[-c(1:3),] })
ans
#update dataframe
lapply(ans, function(x){df<<-rbind(df,x)} )
#this should be the required dataframe
df
Also, if you are on Linux, a much simple method would be to simply make the OS do it for you
awk 'FNR == 4' regional_vol*.txt
This should do it for you.
# set the working directory (where files are saved)
setwd("C:/Users/your_path_here/Desktop/")
file_names = list.files(getwd())
file_names = file_names[grepl(".TXT",file_names)]
# print file_names vector
file_names
# read the WY.TXT file, just for testing
# file = read.csv("C:/Users/your_path_here/Desktop/regional_vol_GM_atlas1.txt", header=F, stringsAsFactors=F)
# see the data structure
str(file)
# run read.csv on all values of file_names
files = lapply(file_names, read.csv, header=F, stringsAsFactors = F)
files = do.call(rbind,files)
# set column names
names(files) = c("field1", "field2", "field3", "field4", "field5")
str(files)
write.table(files, "C:/Users/your_path_here/Desktop/mydata.txt", sep="\t")
write.csv(files,"C:/Users/your_path_here/Desktop/mydata.csv")

Resources