From 3809424429d464eecb82e59f54a71976c9a89505 Mon Sep 17 00:00:00 2001 From: Adam Spannbauer Date: Mon, 11 Dec 2017 18:28:13 -0500 Subject: [PATCH] rm magrittr as a dependency --- DESCRIPTION | 2 +- NAMESPACE | 1 - R/bind_lexrank.R | 5 ++++- R/lexRank.R | 1 - R/lexRankFromSimil.R | 1 - R/sentenceParse.R | 1 - R/sentenceSimil.R | 1 - R/sentenceTokenParse.R | 1 - R/tokenize.R | 1 - R/unnest_sentences.R | 5 ++++- man/bind_lexrank_.Rd | 5 ++++- man/unnest_sentences_.Rd | 5 ++++- tests/testthat/test-bind_lexrank.R | 21 +++++++++------------ tests/testthat/test-bind_lexrank_.R | 21 +++++++++------------ tests/testthat/test-sentenceTokenParse.R | 5 ++--- 15 files changed, 37 insertions(+), 39 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index e5cfdd5..b43f72a 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -10,7 +10,7 @@ URL: https://github.com/AdamSpannbauer/lexRankr/ BugReports: https://github.com/AdamSpannbauer/lexRankr/issues/ LazyData: TRUE RoxygenNote: 6.0.1 -Imports: magrittr, SnowballC, igraph, Rcpp +Imports: SnowballC, igraph, Rcpp Depends: R (>= 2.10) LinkingTo: Rcpp Suggests: covr, testthat, R.rsp diff --git a/NAMESPACE b/NAMESPACE index 35a7b35..18d7761 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -11,7 +11,6 @@ export(tokenize) export(unnest_sentences) export(unnest_sentences_) importFrom(Rcpp,sourceCpp) -importFrom(magrittr,"%>%") importFrom(stats,xtabs) importFrom(utils,combn) useDynLib(lexRankr) diff --git a/R/bind_lexrank.R b/R/bind_lexrank.R index 9549704..c0cdef9 100644 --- a/R/bind_lexrank.R +++ b/R/bind_lexrank.R @@ -13,7 +13,6 @@ #' @param ... tokenizing options to be passed to lexRankr::tokenize. Ignored if \code{level} is "sentences" #' @return A dataframe with an additional column of lexrank scores (column is given name lexrank) #' @examples -#' library(magrittr) #' #' df <- data.frame(doc_id = 1:3, #' text = c("Testing the system. Second sentence for you.", @@ -21,6 +20,9 @@ #' "Documents will be parsed and lexranked."), #' stringsAsFactors = FALSE) #' +#' \dontrun{ +#' library(magrittr) +#' #' df %>% #' unnest_sentences(sents, text) %>% #' bind_lexrank(sents, doc_id, level = "sentences") @@ -42,6 +44,7 @@ #' #' df %>% #' bind_lexrank(tokens, doc_id, sent_id, level = 'tokens') +#' } #' @export bind_lexrank_ <- function(tbl, text, doc_id, sent_id=NULL, level=c("sentences", "tokens"), threshold=.2, usePageRank=TRUE, damping=0.85, continuous=FALSE, ...) { if(!is.data.frame(tbl)) stop("tbl must be a dataframe") diff --git a/R/lexRank.R b/R/lexRank.R index c1a5dfc..94ff305 100644 --- a/R/lexRank.R +++ b/R/lexRank.R @@ -23,7 +23,6 @@ #' lexRank(c("This is a test.","Tests are fun.", #' "Do you think the exam will be hard?","Is an exam the same as a test?", #' "How many questions are going to be on the exam?")) -#' @importFrom magrittr "%>%" #' @export lexRank <- function(text, docId = "create", threshold=.2, n=3, returnTies=TRUE, usePageRank=TRUE, damping=0.85, continuous=FALSE, sentencesAsDocs=FALSE, removePunc=TRUE, removeNum=TRUE, toLower=TRUE, stemWords=TRUE, rmStopWords=TRUE, Verbose=TRUE){ diff --git a/R/lexRankFromSimil.R b/R/lexRankFromSimil.R index 987121c..65261f4 100644 --- a/R/lexRankFromSimil.R +++ b/R/lexRankFromSimil.R @@ -14,7 +14,6 @@ #' @references \url{http://www.cs.cmu.edu/afs/cs/project/jair/pub/volume22/erkan04a-html/erkan04a.html} #' @examples #' lexRankFromSimil(s1=c("d1_1","d1_1","d1_2"), s2=c("d1_2","d2_1","d2_1"), simil=c(.01,.03,.5)) -#' @importFrom magrittr "%>%" #' @export diff --git a/R/sentenceParse.R b/R/sentenceParse.R index 3de4108..baeebf5 100644 --- a/R/sentenceParse.R +++ b/R/sentenceParse.R @@ -8,7 +8,6 @@ #' sentenceParse("Bill is trying to earn a Ph.D.", "You have to have a 5.0 GPA.") #' sentenceParse(c("Bill is trying to earn a Ph.D.", "You have to have a 5.0 GPA."), #' docId=c("d1","d2")) -#' @importFrom magrittr "%>%" #' @export sentenceParse <- function(text, docId = "create") { diff --git a/R/sentenceSimil.R b/R/sentenceSimil.R index c6ee1fb..82917de 100644 --- a/R/sentenceSimil.R +++ b/R/sentenceSimil.R @@ -16,7 +16,6 @@ NULL #' sentenceId=c("d1_1","d1_1","d2_1","d2_1"), #' token=c("i", "ran", "jane", "ran")) #' @importFrom utils combn -#' @importFrom magrittr "%>%" #' @importFrom stats xtabs #' @export diff --git a/R/sentenceTokenParse.R b/R/sentenceTokenParse.R index c605f00..5b42df5 100644 --- a/R/sentenceTokenParse.R +++ b/R/sentenceTokenParse.R @@ -12,7 +12,6 @@ #' @examples #' sentenceTokenParse(c("Bill is trying to earn a Ph.D.", "You have to have a 5.0 GPA."), #' docId=c("d1","d2")) -#' @importFrom magrittr "%>%" #' @export sentenceTokenParse <- function(text, docId = "create", removePunc=TRUE, removeNum=TRUE, toLower=TRUE, stemWords=TRUE, rmStopWords=TRUE){ diff --git a/R/tokenize.R b/R/tokenize.R index ab5c7db..47b0e2c 100644 --- a/R/tokenize.R +++ b/R/tokenize.R @@ -11,7 +11,6 @@ utils::globalVariables(c("smart_stopwords")) #' @examples #' tokenize("Mr. Feeny said the test would be on Sat. At least I'm 99.9% sure that's what he said.") #' tokenize("Bill is trying to earn a Ph.D. in his field.", rmStopWords=FALSE) -#' @importFrom magrittr "%>%" #' @export tokenize <- function(text, removePunc=TRUE, removeNum=TRUE, toLower=TRUE, stemWords=TRUE, rmStopWords=TRUE){ diff --git a/R/unnest_sentences.R b/R/unnest_sentences.R index c81c86c..4140428 100644 --- a/R/unnest_sentences.R +++ b/R/unnest_sentences.R @@ -9,7 +9,6 @@ #' @param drop whether original input column should get dropped #' @return A data.frame of parsed sentences and sentence ids #' @examples -#' library(magrittr) #' #' df <- data.frame(doc_id = 1:3, #' text = c("Testing the system. Second sentence for you.", @@ -20,8 +19,12 @@ #' unnest_sentences(df, sents, text) #' unnest_sentences_(df, "sents", "text") #' +#' \dontrun{ +#' library(magrittr) +#' #' df %>% #' unnest_sentences(sents, text) +#' } #' @export unnest_sentences_ <- function(tbl, output, input, doc_id=NULL, output_id="sent_id", drop=TRUE) { diff --git a/man/bind_lexrank_.Rd b/man/bind_lexrank_.Rd index a9a9e2b..32a80f6 100644 --- a/man/bind_lexrank_.Rd +++ b/man/bind_lexrank_.Rd @@ -41,7 +41,6 @@ A dataframe with an additional column of lexrank scores (column is given name le Bind lexrank scores to a dataframe of sentences or to a dataframe of tokens with sentence ids } \examples{ -library(magrittr) df <- data.frame(doc_id = 1:3, text = c("Testing the system. Second sentence for you.", @@ -49,6 +48,9 @@ df <- data.frame(doc_id = 1:3, "Documents will be parsed and lexranked."), stringsAsFactors = FALSE) +\dontrun{ +library(magrittr) + df \%>\% unnest_sentences(sents, text) \%>\% bind_lexrank(sents, doc_id, level = "sentences") @@ -71,3 +73,4 @@ df <- data.frame(doc_id = c(1, 1, 1, 1, 1, 1, 1, 2, 2, 2, df \%>\% bind_lexrank(tokens, doc_id, sent_id, level = 'tokens') } +} diff --git a/man/unnest_sentences_.Rd b/man/unnest_sentences_.Rd index 6977cad..31e408c 100644 --- a/man/unnest_sentences_.Rd +++ b/man/unnest_sentences_.Rd @@ -31,7 +31,6 @@ A data.frame of parsed sentences and sentence ids Split a column of text into sentences } \examples{ -library(magrittr) df <- data.frame(doc_id = 1:3, text = c("Testing the system. Second sentence for you.", @@ -42,6 +41,10 @@ df <- data.frame(doc_id = 1:3, unnest_sentences(df, sents, text) unnest_sentences_(df, "sents", "text") +\dontrun{ +library(magrittr) + df \%>\% unnest_sentences(sents, text) } +} diff --git a/tests/testthat/test-bind_lexrank.R b/tests/testthat/test-bind_lexrank.R index 5179802..98115e8 100644 --- a/tests/testthat/test-bind_lexrank.R +++ b/tests/testthat/test-bind_lexrank.R @@ -8,17 +8,15 @@ test_that("correct ouput class and str", { "Documents will be parsed and lexranked."), stringsAsFactors = FALSE) - test_result <- df %>% - unnest_sentences(sents, text) %>% - bind_lexrank(sents, doc_id, level = 'sentences') + test_result <- unnest_sentences(df, sents, text) + test_result <- bind_lexrank(test_result, sents, doc_id, level = 'sentences') expect_equal(dim(test_result), c(4,4)) expect_true(is.data.frame(test_result)) expect_equal(names(test_result), c("doc_id","sent_id","sents","lexrank")) - test_result <- df %>% - unnest_sentences(sents, text, drop=FALSE) %>% - bind_lexrank(sents,doc_id, level = 'sentences') + test_result <- unnest_sentences(df, sents, text, drop=FALSE) + test_result <- bind_lexrank(test_result, sents,doc_id, level = 'sentences') expect_equal(dim(test_result), c(4,5)) expect_equal(names(test_result), c("doc_id","text","sent_id","sents","lexrank")) @@ -35,8 +33,7 @@ test_that("correct ouput class and str", { "tidy", "documents", "df", "documents", "will", "be", "parsed", "and", "lexranked"), stringsAsFactors = FALSE) - test_result <- df %>% - bind_lexrank(tokens, doc_id, sent_id, "tokens") + test_result <- bind_lexrank(df, tokens, doc_id, sent_id, "tokens") expect_equal(dim(test_result), c(19,5)) expect_equal(names(test_result), c("doc_id","sent_id","sents","tokens","lexrank")) @@ -48,8 +45,8 @@ test_that("test input checking", { text = c("Testing the system. Second sentence for you.", "System testing the tidy documents df.", "Documents will be parsed and lexranked."), - stringsAsFactors = FALSE) %>% - unnest_sentences(sents, text) + stringsAsFactors = FALSE) + df <- unnest_sentences(df, sents, text) expect_error(bind_lexrank(df, sents, fake)) expect_error(bind_lexrank(NULL, sents, doc_id)) @@ -79,8 +76,8 @@ test_that("output value", { text = c("Testing the system. Second sentence for you.", "System testing the tidy documents df.", "Documents will be parsed and lexranked."), - stringsAsFactors = FALSE) %>% - unnest_sentences(sents, text) + stringsAsFactors = FALSE) + df <- unnest_sentences(df, sents, text) test_result <- bind_lexrank(df, sents, doc_id, level="sentences") expected_result <- data.frame(doc_id = c(1L, 1L, 2L, 3L), diff --git a/tests/testthat/test-bind_lexrank_.R b/tests/testthat/test-bind_lexrank_.R index 524ec4b..3d4800a 100644 --- a/tests/testthat/test-bind_lexrank_.R +++ b/tests/testthat/test-bind_lexrank_.R @@ -8,17 +8,15 @@ test_that("correct ouput class and str", { "Documents will be parsed and lexranked."), stringsAsFactors = FALSE) - test_result <- df %>% - unnest_sentences(sents, text) %>% - bind_lexrank_("sents", "doc_id", level = 'sentences') + test_result <- unnest_sentences(df, sents, text) + test_result <- bind_lexrank_(test_result, "sents", "doc_id", level = 'sentences') expect_equal(dim(test_result), c(4,4)) expect_true(is.data.frame(test_result)) expect_equal(names(test_result), c("doc_id","sent_id","sents","lexrank")) - test_result <- df %>% - unnest_sentences(sents, text, drop=FALSE) %>% - bind_lexrank_("sents", "doc_id", level = 'sentences') + test_result <- unnest_sentences(df, sents, text, drop=FALSE) + test_result <- bind_lexrank_(test_result, "sents", "doc_id", level = 'sentences') expect_equal(dim(test_result), c(4,5)) expect_equal(names(test_result), c("doc_id","text","sent_id","sents","lexrank")) @@ -35,8 +33,7 @@ test_that("correct ouput class and str", { "tidy", "documents", "df", "documents", "will", "be", "parsed", "and", "lexranked"), stringsAsFactors = FALSE) - test_result <- df %>% - bind_lexrank_("tokens", "doc_id", "sent_id", "tokens") + test_result <- bind_lexrank_(df, "tokens", "doc_id", "sent_id", "tokens") expect_equal(dim(test_result), c(19,5)) expect_equal(names(test_result), c("doc_id","sent_id","sents","tokens","lexrank")) @@ -48,8 +45,8 @@ test_that("test input checking", { text = c("Testing the system. Second sentence for you.", "System testing the tidy documents df.", "Documents will be parsed and lexranked."), - stringsAsFactors = FALSE) %>% - unnest_sentences(sents, text) + stringsAsFactors = FALSE) + df <- unnest_sentences(df, sents, text) expect_error(bind_lexrank_(df, "sents", "fake")) expect_error(bind_lexrank_(NULL, "sents", "doc_id")) @@ -79,8 +76,8 @@ test_that("output value", { text = c("Testing the system. Second sentence for you.", "System testing the tidy documents df.", "Documents will be parsed and lexranked."), - stringsAsFactors = FALSE) %>% - unnest_sentences(sents, text) + stringsAsFactors = FALSE) + df <- unnest_sentences(df, sents, text) test_result <- bind_lexrank_(df, "sents", "doc_id", level="sentences") expected_result <- data.frame(doc_id = c(1L, 1L, 2L, 3L), diff --git a/tests/testthat/test-sentenceTokenParse.R b/tests/testthat/test-sentenceTokenParse.R index a0317f8..a37abb4 100644 --- a/tests/testthat/test-sentenceTokenParse.R +++ b/tests/testthat/test-sentenceTokenParse.R @@ -28,9 +28,8 @@ test_that("All clean options TRUE", { rmStopWords=TRUE) expectedResultSentences <- sentenceParse(testDocs) - expectedResultTokens <- lexRankr::tokenize(testDocs) %>% - unlist() %>% - .[which(!is.na(.))] + expectedResultTokens <- unlist(lexRankr::tokenize(testDocs)) + expectedResultTokens <- expectedResultTokens[which(!is.na(expectedResultTokens))] expect_equal(testResult$sentences, expectedResultSentences) expect_equal(testResult$tokens$token, expectedResultTokens)