diff --git a/R/style-guides.R b/R/style-guides.R index d6d4c9b6b..5e28997cb 100644 --- a/R/style-guides.R +++ b/R/style-guides.R @@ -175,35 +175,49 @@ tidyverse_style <- function(scope = "tokens", } subset_transformers <- list( - force_assignment_op = "EQ_ASSIGN", - add_line_break_after_pipe = "SPECIAL-PIPE", - wrap_if_else_while_for_fun_multi_line_in_curly = c("IF", "WHILE", "FOR", "FUNCTION"), - remove_line_breaks_in_fun_dec = "FUNCTION", - set_space_in_curly_curly = c("'{'", "'}'"), - set_space_between_eq_sub_and_comma = "EQ_SUB", - remove_space_around_colons = c("':'", "NS_GET_INT", "NS_GET"), - remove_space_after_fun_dec = "FUNCTION", - remove_space_before_dollar = "'$'", - set_space_after_bang_bang = "'!'", - remove_space_after_excl = "'!'", - style_space_around_tilde = "'~'", - add_space_after_for_if_while = c("IF", "WHILE", "FOR"), - set_line_break_around_curly_curly = "'{'", - indent_braces = c("'('", "'['", "'{'", "')'", "']'", "'}'"), - unindent_fun_dec = "FUNCTION", - indent_eq_sub = c("EQ_SUB", "EQ_FORMALS"), # TODO rename - update_indention_ref_fun_dec = "FUNCTION", - remove_space_before_closing_paren = c("')'", "']'"), - remove_space_before_opening_paren = c("'('", "'['", "LBB"), - remove_space_before_comma = "','", - style_space_around_math_token = c( - math_token_spacing$zero, - math_token_spacing$one + token = list( + resolve_semicolon = "';'", + add_brackets_in_pipe = "SPECIAL-PIPE", + force_assignment_op = c("token" = "EQ_ASSIGN"), + wrap_if_else_while_for_fun_multi_line_in_curly = c("IF", "WHILE", "FOR", "FUNCTION") ), - remove_space_after_opening_paren = c("'('", "'['", "LBB"), - start_comments_with_space = "COMMENT", - remove_line_break_before_round_closing_after_curly = "'}'", - style_line_break_around_curly = "'{'" + line_break = list( + set_line_break_before_curly_opening = "'{'", + remove_line_break_before_round_closing_after_curly = "'}'", + remove_line_breaks_in_fun_dec = "FUNCTION", + set_line_break_around_curly_curly = "'{'", + style_line_break_around_curly = "'{'", + add_line_break_after_pipe = "SPECIAL-PIPE" + ), + space = list( + remove_space_before_closing_paren = c("')'", "']'"), + remove_space_before_opening_paren = c("'('", "'['", "LBB"), + add_space_after_for_if_while = c("IF", "WHILE", "FOR"), + add_space_before_brace = "'{'", + remove_space_before_comma = "','", + set_space_between_eq_sub_and_comma = "EQ_SUB", + style_space_around_math_token = c( + math_token_spacing$zero, + math_token_spacing$one + ), + style_space_around_tilde = "'~'", + remove_space_after_opening_paren = c("'('", "'['", "LBB"), + remove_space_after_excl = "'!'", + set_space_after_bang_bang = "'!'", + remove_space_before_dollar = "'$'", + remove_space_after_fun_dec = "FUNCTION", + remove_space_around_colons = c("':'", "NS_GET_INT", "NS_GET"), + start_comments_with_space = "COMMENT", + remove_space_after_unary_pm_nested = c("'+'", "'-'"), + spacing_before_comments = "COMMENT", + set_space_in_curly_curly = c("'{'", "'}'") + ), + indent = list( + indent_braces = c("'('", "'['", "'{'", "')'", "']'", "'}'"), + unindent_fun_dec = "FUNCTION", + indent_eq_sub = c("EQ_SUB", "EQ_FORMALS"), # TODO rename + update_indention_ref_fun_dec = "FUNCTION" + ) ) style_guide_name <- "styler::tidyverse_style@https://github.com/r-lib" diff --git a/R/transform-files.R b/R/transform-files.R index 0bc84ad0e..4ee7bd883 100644 --- a/R/transform-files.R +++ b/R/transform-files.R @@ -250,15 +250,34 @@ parse_transform_serialize_r <- function(text, text_out } -transformers_subset_impl <- function(x, token) { - if (!any(x %in% token)) { - x +#' Removes the transformers if tokens in x +#' @param transformers The style full style guide, i.e. the result of +#' [create_style_guide()]. +#' @param token Named character vector: Names are rules and values are the token +#' that trigger are required to be absent to trigger a removal. +#' @param scope The low-level scope, e.g. 'token'. +#' @param code tokenized code for which we check if `token` is in them. +transformers_subset_impl <- function(transformers, token, scope, code) { + transformer_names <- names(token) + for (i in seq_along(token)) { + if (!any(token[i] %in% code)) { + transformers[[scope]][[transformer_names[i]]] <- NULL + } } + transformers } #' Remove transformers that are not needed +#' #' For every transformer, at least one token must be given to make subsetting. #' active. +#' @param text Text to parse. Can also be the column `text` of the output of +#' [compute_parse_data_nested()], where each element is a token (instead of a +#' line). +#' @return +#' Returns `transformers`, but stripped away those rules that are not used when +#' styling `text`. +#' @keywords internal transformers_subset <- function(text, transformers) { is_colon <- text == ";" if (any(is_colon)) { @@ -266,20 +285,12 @@ transformers_subset <- function(text, transformers) { # here since text is output of compute_parse_data_nested. text <- c(text[!is_colon], "1;") } - token <- unique(tokenize(text)$token) - to_remove <- purrr::map( + purrr::reduce2( transformers$subset_transformers, - transformers_subset_impl, token - ) %>% - compact() %>% # ise imap, return names directly, save compact() - names() - if (length(to_remove) > 0) { - for (scope in c("initialize", "line_break", "space", "token", "indention")) { - transformers[[scope]][to_remove] <- NULL - transformers[[scope]] <- purrr::compact(transformers[[scope]]) - } - } - transformers + names(transformers$subset_transformers), + transformers_subset_impl, unique(tokenize(text)$token), + .init = transformers + ) } #' Apply transformers to a parse table diff --git a/inst/WORDLIST b/inst/WORDLIST index 23cda16a8..ea1adc5fe 100644 --- a/inst/WORDLIST +++ b/inst/WORDLIST @@ -68,6 +68,7 @@ href http https icloud +impl infinitively initializer innode @@ -190,6 +191,7 @@ tidyeval tidyr tidyverse Tidyverse +tokenized travis tryCatch tryGugus diff --git a/man/transformers_subset.Rd b/man/transformers_subset.Rd index 07e2a3767..064718815 100644 --- a/man/transformers_subset.Rd +++ b/man/transformers_subset.Rd @@ -2,14 +2,21 @@ % Please edit documentation in R/transform-files.R \name{transformers_subset} \alias{transformers_subset} -\title{Remove transformers that are not needed -For every transformer, at least one token must be given to make subsetting. -active.} +\title{Remove transformers that are not needed} \usage{ transformers_subset(text, transformers) } +\arguments{ +\item{text}{Text to parse. Can also be the column \code{text} of the output of +\code{\link[=compute_parse_data_nested]{compute_parse_data_nested()}}, where each element is a token (instead of a +line).} +} +\value{ +Returns \code{transformers}, but stripped away those rules that are not used when +styling \code{text}. +} \description{ -Remove transformers that are not needed For every transformer, at least one token must be given to make subsetting. active. } +\keyword{internal} diff --git a/man/transformers_subset_impl.Rd b/man/transformers_subset_impl.Rd new file mode 100644 index 000000000..e43a2bc41 --- /dev/null +++ b/man/transformers_subset_impl.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-files.R +\name{transformers_subset_impl} +\alias{transformers_subset_impl} +\title{Removes the transformers if tokens in x} +\usage{ +transformers_subset_impl(transformers, token, scope, code) +} +\arguments{ +\item{transformers}{The style full style guide, i.e. the result of +\code{\link[=create_style_guide]{create_style_guide()}}.} + +\item{token}{Named character vector: Names are rules and values are the token +that trigger are required to be absent to trigger a removal.} + +\item{scope}{The low-level scope, e.g. 'token'.} + +\item{code}{tokenized code for which we check if \code{token} is in them.} +} +\description{ +Removes the transformers if tokens in x +} diff --git a/tests/testthat/test-transformers-subset.R b/tests/testthat/test-transformers-subset.R new file mode 100644 index 000000000..c78044fd1 --- /dev/null +++ b/tests/testthat/test-transformers-subset.R @@ -0,0 +1,69 @@ +# c/cp from remove_space_after_excl but for self-containement repeated +remove_space_after_excl_ <- function(pd_flat) { + excl <- (pd_flat$token == "'!'") & + (pd_flat$token_after != "'!'") & + (pd_flat$newlines == 0L) + pd_flat$spaces[excl] <- 0L + pd_flat +} + +t <- create_style_guide( + space = lst(remove_space_after_excl_), + subset_transformers = list(space = list(remove_space_after_excl_ = c("'!'"))), +) + +t_no_subset <- create_style_guide( + space = lst(remove_space_after_excl_), + subset_transformers = NULL, +) + +t_empty_subset1 <- create_style_guide( + space = lst(remove_space_after_excl_), + subset_transformers = list(space = list()), +) + +t_empty_subset2 <- create_style_guide( + space = lst(remove_space_after_excl_), + subset_transformers = list(), +) + +test_that("transformers are not removed if they are used", { + t_new <- transformers_subset( + "!x", t + ) + expect_equal(t_new, t) +}) + +test_that("transformers are removed if they are unused", { + t_fun <- transformers_subset( + "x", t + ) + t_manual <- t + t_manual$space$remove_space_after_excl_ <- NULL + expect_equal(t_fun, t_manual) +}) + + +test_that("if no subset_transformers is specified, no transformer is removed and no error issued", { + t_fun <- transformers_subset( + "x", t_no_subset + ) + expect_equal(t_fun, t_no_subset) + + t_fun <- transformers_subset( + "x", t_empty_subset1 + ) + expect_equal(t_fun, t_empty_subset1) + + t_fun <- transformers_subset( + "x", t_empty_subset2 + ) + expect_equal(t_fun, t_empty_subset2) +}) + +test_that('semi-colon is parsed without error', { + expect_equal( + transformers_subset(c("a", ";", "b"), t_fun), + t_fun + ) +})