Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add fast-path for comment detection #9808

Merged
merged 1 commit into from
Feb 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 14 additions & 3 deletions crates/ruff_linter/src/rules/eradicate/detection.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
/// See: [eradicate.py](https://github.com/myint/eradicate/blob/98f199940979c94447a461d50d27862b118b282d/eradicate.py)
use aho_corasick::AhoCorasick;
use itertools::Itertools;
use once_cell::sync::Lazy;
use regex::{Regex, RegexSet};

use ruff_python_parser::parse_suite;
use ruff_python_trivia::{SimpleTokenKind, SimpleTokenizer};
use ruff_text_size::TextSize;

static CODE_INDICATORS: Lazy<AhoCorasick> = Lazy::new(|| {
AhoCorasick::new([
"(", ")", "[", "]", "{", "}", ":", "=", "%", "print", "return", "break", "continue",
"import",
"(", ")", "[", "]", "{", "}", ":", "=", "%", "return", "break", "continue", "import",
])
.unwrap()
});
Expand Down Expand Up @@ -44,6 +46,14 @@ pub(crate) fn comment_contains_code(line: &str, task_tags: &[String]) -> bool {
return false;
}

// Fast path: if the comment contains consecutive identifiers, we know it won't parse.
let tokenizer = SimpleTokenizer::starts_at(TextSize::default(), line).skip_trivia();
if tokenizer.tuple_windows().any(|(first, second)| {
first.kind == SimpleTokenKind::Name && second.kind == SimpleTokenKind::Name
}) {
return false;
}

// Ignore task tag comments (e.g., "# TODO(tom): Refactor").
if line
.split(&[' ', ':', '('])
Expand Down Expand Up @@ -123,9 +133,10 @@ mod tests {

#[test]
fn comment_contains_code_with_print() {
assert!(comment_contains_code("#print", &[]));
assert!(comment_contains_code("#print(1)", &[]));

assert!(!comment_contains_code("#print", &[]));
assert!(!comment_contains_code("#print 1", &[]));
assert!(!comment_contains_code("#to print", &[]));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ expression: test_case.tokens()
---
[
SimpleToken {
kind: Other,
kind: Name,
range: 0..2,
},
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
expression: test_case.tokens()
---
[
SimpleToken {
kind: Name,
range: 0..3,
},
SimpleToken {
kind: Whitespace,
range: 3..4,
},
SimpleToken {
kind: Name,
range: 4..7,
},
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
expression: test_case.tokens()
---
[
SimpleToken {
kind: Other,
range: 0..2,
},
SimpleToken {
kind: Bogus,
range: 2..7,
},
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
expression: test_case.tokens()
---
[
SimpleToken {
kind: Name,
range: 0..3,
},
SimpleToken {
kind: Other,
range: 3..4,
},
SimpleToken {
kind: Bogus,
range: 4..8,
},
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
---
source: crates/ruff_python_trivia/src/tokenizer.rs
expression: test_case.tokens()
---
[
SimpleToken {
kind: Other,
range: 0..1,
},
SimpleToken {
kind: Bogus,
range: 1..6,
},
]
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ expression: test_case.tokens()
---
[
SimpleToken {
kind: Other,
kind: Name,
range: 0..6,
},
]
80 changes: 77 additions & 3 deletions crates/ruff_python_trivia/src/tokenizer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ fn to_keyword_or_other(source: &str) -> SimpleTokenKind {
"case" => SimpleTokenKind::Case,
"with" => SimpleTokenKind::With,
"yield" => SimpleTokenKind::Yield,
_ => SimpleTokenKind::Other, // Potentially an identifier, but only if it isn't a string prefix. We can ignore this for now https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
_ => SimpleTokenKind::Name, // Potentially an identifier, but only if it isn't a string prefix. The caller (SimpleTokenizer) is responsible for enforcing that constraint.
}
}

Expand Down Expand Up @@ -467,6 +467,9 @@ pub enum SimpleTokenKind {
/// `yield`
Yield,

/// An identifier or keyword.
Name,

/// Any other non trivia token.
Other,

Expand Down Expand Up @@ -566,10 +569,42 @@ impl<'a> SimpleTokenizer<'a> {
let range = TextRange::at(self.offset, token_len);
let kind = to_keyword_or_other(&self.source[range]);

if kind == SimpleTokenKind::Other {
// If the next character is a quote, we may be in a string prefix. For example:
// `f"foo`.
if kind == SimpleTokenKind::Name
&& matches!(self.cursor.first(), '"' | '\'')
&& matches!(
&self.source[range],
"B" | "BR"
| "Br"
| "F"
| "FR"
| "Fr"
| "R"
| "RB"
| "RF"
| "Rb"
| "Rf"
| "U"
| "b"
| "bR"
| "br"
| "f"
| "fR"
| "fr"
| "r"
| "rB"
| "rF"
| "rb"
| "rf"
| "u"
)
{
self.bogus = true;
SimpleTokenKind::Other
} else {
kind
}
kind
}

// Space, tab, or form feed. We ignore the true semantics of form feed, and treat it as
Expand Down Expand Up @@ -1153,6 +1188,45 @@ mod tests {
test_case.assert_reverse_tokenization();
}

#[test]
fn string_with_kind() {
let source = "f'foo'";

let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());

// note: not reversible: [other, bogus] vs [bogus, other]
}

#[test]
fn string_with_byte_kind() {
let source = "BR'foo'";

let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());

// note: not reversible: [other, bogus] vs [bogus, other]
}

#[test]
fn string_with_invalid_kind() {
let source = "abc'foo'";

let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());

// note: not reversible: [other, bogus] vs [bogus, other]
}

#[test]
fn identifier_starting_with_string_kind() {
let source = "foo bar";

let test_case = tokenize(source);
assert_debug_snapshot!(test_case.tokens());
test_case.assert_reverse_tokenization();
}

#[test]
fn ignore_word_with_only_id_continuing_chars() {
let source = "555";
Expand Down
Loading