From 73162992e16d56b46f57c52f2146e9c058c0eec7 Mon Sep 17 00:00:00 2001 From: Pachamaltese Date: Mon, 10 May 2021 23:38:51 -0400 Subject: [PATCH 1/2] Update pkg.R --- R/pkg.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/pkg.R b/R/pkg.R index 35363e9..ffc0e1d 100644 --- a/R/pkg.R +++ b/R/pkg.R @@ -2,7 +2,7 @@ #' #' R is a great tool, but processing large text files with data is cumbersome. #' \code{chunked} helps you to process large text files with dplyr while loading -#' only a part of the data in memory. It builds on the execellent R package LaF +#' only a part of the data in memory. It builds on the excellent R package LaF #' Processing commands are writing in dplyr syntax, and \code{chunked} #' (using \code{LaF}) #' will take care that chunk by chunk is processed, taking far less memory From c2aebd4864804b618a2477f83a554b29624f1a4f Mon Sep 17 00:00:00 2001 From: Pachamaltese Date: Mon, 10 May 2021 23:57:27 -0400 Subject: [PATCH 2/2] Update write.R --- R/write.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/write.R b/R/write.R index 414fe3a..2ee5c2e 100644 --- a/R/write.R +++ b/R/write.R @@ -59,7 +59,7 @@ write_table_chunkwise <- function(x, file="", sep="\t", dec=".", col.names=TRUE, write_csv_chunkwise(x=x, file=file, sep=sep, dec=dec, col.names=col.names, row.names=row.names, ...) } -#' Genereric function to write chunk by chunk +#' Generic function to write chunk by chunk #' @export #' @param x chunked input, e.g. created with \code{read_chunkwise} or it can be a \code{tbl_sql} object. #' @param dest where should the data be written. May be a character or