diff --git a/R/pkg.R b/R/pkg.R index 35363e9..ffc0e1d 100644 --- a/R/pkg.R +++ b/R/pkg.R @@ -2,7 +2,7 @@ #' #' R is a great tool, but processing large text files with data is cumbersome. #' \code{chunked} helps you to process large text files with dplyr while loading -#' only a part of the data in memory. It builds on the execellent R package LaF +#' only a part of the data in memory. It builds on the excellent R package LaF #' Processing commands are writing in dplyr syntax, and \code{chunked} #' (using \code{LaF}) #' will take care that chunk by chunk is processed, taking far less memory diff --git a/R/write.R b/R/write.R index 414fe3a..2ee5c2e 100644 --- a/R/write.R +++ b/R/write.R @@ -59,7 +59,7 @@ write_table_chunkwise <- function(x, file="", sep="\t", dec=".", col.names=TRUE, write_csv_chunkwise(x=x, file=file, sep=sep, dec=dec, col.names=col.names, row.names=row.names, ...) } -#' Genereric function to write chunk by chunk +#' Generic function to write chunk by chunk #' @export #' @param x chunked input, e.g. created with \code{read_chunkwise} or it can be a \code{tbl_sql} object. #' @param dest where should the data be written. May be a character or