Skip to content

Commit

Permalink
Modernize Syntax for python 3 (#206)
Browse files Browse the repository at this point in the history
* Remove u"strings" that are no longer needed in python 3.

* Remove old-style super syntax.
  • Loading branch information
lucaswiman authored Apr 19, 2022
1 parent 6d684ea commit 1fdd869
Show file tree
Hide file tree
Showing 7 changed files with 33 additions and 33 deletions.
10 changes: 5 additions & 5 deletions parsimonious/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ def __init__(self, text, pos=-1, expr=None):
self.expr = expr

def __str__(self):
rule_name = ((u"'%s'" % self.expr.name) if self.expr.name else
rule_name = (("'%s'" % self.expr.name) if self.expr.name else
str(self.expr))
return u"Rule %s didn't match at '%s' (line %s, column %s)." % (
return "Rule %s didn't match at '%s' (line %s, column %s)." % (
rule_name,
self.text[self.pos:self.pos + 20],
self.line(),
Expand Down Expand Up @@ -49,7 +49,7 @@ class IncompleteParseError(ParseError):
entire text."""

def __str__(self):
return u"Rule '%s' matched in its entirety, but it didn't consume all the text. The non-matching portion of the text begins with '%s' (line %s, column %s)." % (
return "Rule '%s' matched in its entirety, but it didn't consume all the text. The non-matching portion of the text begins with '%s' (line %s, column %s)." % (
self.expr.name,
self.text[self.pos:self.pos + 20],
self.line(),
Expand All @@ -76,7 +76,7 @@ def __init__(self, exc, exc_class, node):
"""
self.original_class = exc_class
super(VisitationError, self).__init__(
super().__init__(
'%s: %s\n\n'
'Parse tree:\n'
'%s' %
Expand Down Expand Up @@ -105,4 +105,4 @@ def __init__(self, label):
self.label = label

def __str__(self):
return u'The label "%s" was never defined.' % self.label
return 'The label "%s" was never defined.' % self.label
20 changes: 10 additions & 10 deletions parsimonious/expressions.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ def match_core(self, text, pos, cache, error):
return node

def __str__(self):
return u'<%s %s>' % (
return '<%s %s>' % (
self.__class__.__name__,
self.as_rule())

Expand All @@ -218,7 +218,7 @@ def as_rule(self):
if rhs.startswith('(') and rhs.endswith(')'):
rhs = rhs[1:-1]

return (u'%s = %s' % (self.name, rhs)) if self.name else rhs
return ('%s = %s' % (self.name, rhs)) if self.name else rhs

def _unicode_members(self):
"""Return an iterable of my unicode-represented children, stopping
Expand All @@ -244,7 +244,7 @@ class Literal(Expression):
__slots__ = ['literal']

def __init__(self, literal, name=''):
super(Literal, self).__init__(name)
super().__init__(name)
self.literal = literal
self.identity_tuple = (name, literal)

Expand Down Expand Up @@ -278,7 +278,7 @@ class Regex(Expression):

def __init__(self, pattern, name='', ignore_case=False, locale=False,
multiline=False, dot_all=False, unicode=False, verbose=False, ascii=False):
super(Regex, self).__init__(name)
super().__init__(name)
self.re = re.compile(pattern, (ignore_case and re.I) |
(locale and re.L) |
(multiline and re.M) |
Expand Down Expand Up @@ -314,7 +314,7 @@ class Compound(Expression):

def __init__(self, *members, **kwargs):
"""``members`` is a sequence of expressions."""
super(Compound, self).__init__(kwargs.get('name', ''))
super().__init__(kwargs.get('name', ''))
self.members = members

def resolve_refs(self, rule_map):
Expand Down Expand Up @@ -356,7 +356,7 @@ def _uncached_match(self, text, pos, cache, error):
return Node(self, text, pos, new_pos, children)

def _as_rhs(self):
return u'({0})'.format(u' '.join(self._unicode_members()))
return '({0})'.format(' '.join(self._unicode_members()))


class OneOf(Compound):
Expand All @@ -374,7 +374,7 @@ def _uncached_match(self, text, pos, cache, error):
return Node(self, text, pos, node.end, children=[node])

def _as_rhs(self):
return u'({0})'.format(u' / '.join(self._unicode_members()))
return '({0})'.format(' / '.join(self._unicode_members()))


class Lookahead(Compound):
Expand All @@ -384,7 +384,7 @@ class Lookahead(Compound):
__slots__ = ['negativity']

def __init__(self, member, *, negative=False, **kwargs):
super(Lookahead, self).__init__(member, **kwargs)
super().__init__(member, **kwargs)
self.negativity = bool(negative)

def _uncached_match(self, text, pos, cache, error):
Expand All @@ -393,7 +393,7 @@ def _uncached_match(self, text, pos, cache, error):
return Node(self, text, pos, pos)

def _as_rhs(self):
return u'%s%s' % ('!' if self.negativity else '&', self._unicode_members()[0])
return '%s%s' % ('!' if self.negativity else '&', self._unicode_members()[0])

def Not(term):
return Lookahead(term, negative=True)
Expand All @@ -406,7 +406,7 @@ class Quantifier(Compound):
__slots__ = ['min', 'max']

def __init__(self, member, *, min=0, max=float('inf'), name='', **kwargs):
super(Quantifier, self).__init__(member, name=name, **kwargs)
super().__init__(member, name=name, **kwargs)
self.min = min
self.max = max

Expand Down
6 changes: 3 additions & 3 deletions parsimonious/grammar.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def __init__(self, rules='', **more_rules):
for k, v in more_rules.items()}

exprs, first = self._expressions_from_rules(rules, decorated_custom_rules)
super(Grammar, self).__init__(exprs.items())
super().__init__(exprs.items())
self.default_rule = first # may be None

def default(self, rule_name):
Expand Down Expand Up @@ -260,7 +260,7 @@ class LazyReference(str):
"""A lazy reference to a rule, which we resolve after grokking all the
rules"""

name = u''
name = ''

def resolve_refs(self, rule_map):
"""
Expand Down Expand Up @@ -292,7 +292,7 @@ def resolve_refs(self, rule_map):

# Just for debugging:
def _as_rhs(self):
return u'<LazyReference to %s>' % self
return '<LazyReference to %s>' % self


class RuleVisitor(NodeVisitor):
Expand Down
16 changes: 8 additions & 8 deletions parsimonious/tests/test_expressions.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def test_parse_with_leftovers(self):
grammar.parse('chitty bangbang')
except IncompleteParseError as error:
self.assertEqual(str(
error), u"Rule 'sequence' matched in its entirety, but it didn't consume all the text. The non-matching portion of the text begins with 'bang' (line 1, column 12).")
error), "Rule 'sequence' matched in its entirety, but it didn't consume all the text. The non-matching portion of the text begins with 'bang' (line 1, column 12).")

def test_favoring_named_rules(self):
"""Named rules should be used in error messages in favor of anonymous
Expand All @@ -229,7 +229,7 @@ def test_favoring_named_rules(self):
try:
grammar.parse('burp')
except ParseError as error:
self.assertEqual(str(error), u"Rule 'starts_with_a' didn't match at 'burp' (line 1, column 1).")
self.assertEqual(str(error), "Rule 'starts_with_a' didn't match at 'burp' (line 1, column 1).")

def test_line_and_column(self):
"""Make sure we got the line and column computation right."""
Expand All @@ -252,7 +252,7 @@ class RepresentationTests(TestCase):
def test_unicode_crash(self):
"""Make sure matched unicode strings don't crash ``__str__``."""
grammar = Grammar(r'string = ~r"\S+"u')
str(grammar.parse(u'中文'))
str(grammar.parse('中文'))

def test_unicode(self):
"""Smoke-test the conversion of expressions to bits of rules.
Expand All @@ -270,7 +270,7 @@ def test_unicode_keep_parens(self):
"""
# ZeroOrMore
self.assertEqual(str(Grammar('foo = "bar" ("baz" "eggs")* "spam"')),
u"foo = 'bar' ('baz' 'eggs')* 'spam'")
"foo = 'bar' ('baz' 'eggs')* 'spam'")

# Quantifiers
self.assertEqual(str(Grammar('foo = "bar" ("baz" "eggs"){2,4} "spam"')),
Expand All @@ -288,15 +288,15 @@ def test_unicode_keep_parens(self):

# OneOf
self.assertEqual(str(Grammar('foo = "bar" ("baz" / "eggs") "spam"')),
u"foo = 'bar' ('baz' / 'eggs') 'spam'")
"foo = 'bar' ('baz' / 'eggs') 'spam'")

# Lookahead
self.assertEqual(str(Grammar('foo = "bar" &("baz" "eggs") "spam"')),
u"foo = 'bar' &('baz' 'eggs') 'spam'")
"foo = 'bar' &('baz' 'eggs') 'spam'")

# Multiple sequences
self.assertEqual(str(Grammar('foo = ("bar" "baz") / ("baff" "bam")')),
u"foo = ('bar' 'baz') / ('baff' 'bam')")
"foo = ('bar' 'baz') / ('baff' 'bam')")

def test_unicode_surrounding_parens(self):
"""
Expand All @@ -305,7 +305,7 @@ def test_unicode_surrounding_parens(self):
"""
self.assertEqual(str(Grammar('foo = ("foo" ("bar" "baz"))')),
u"foo = 'foo' ('bar' 'baz')")
"foo = 'foo' ('bar' 'baz')")


class SlotsTests(TestCase):
Expand Down
4 changes: 2 additions & 2 deletions parsimonious/tests/test_grammar.py
Original file line number Diff line number Diff line change
Expand Up @@ -568,9 +568,9 @@ def test_parse_failure(self):
assert "Rule 'foo' didn't match at" in str(e.value)

def test_token_repr(self):
t = Token(u'💣')
t = Token('💣')
self.assertTrue(isinstance(t.__repr__(), str))
self.assertEqual(u'<Token "💣">', t.__repr__())
self.assertEqual('<Token "💣">', t.__repr__())

def test_token_star_plus_expressions(self):
a = Token("a")
Expand Down
8 changes: 4 additions & 4 deletions parsimonious/tests/test_nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,16 +65,16 @@ def test_str(self):

def test_repr(self):
"""Test repr of ``Node``."""
s = u'hai ö'
boogie = u'böogie'
s = 'hai ö'
boogie = 'böogie'
n = Node(Literal(boogie), s, 0, 3, children=[
Node(Literal(' '), s, 3, 4), Node(Literal(u'ö'), s, 4, 5)])
Node(Literal(' '), s, 3, 4), Node(Literal('ö'), s, 4, 5)])
self.assertEqual(repr(n),
str("""s = {hai_o}\nNode({boogie}, s, 0, 3, children=[Node({space}, s, 3, 4), Node({o}, s, 4, 5)])""").format(
hai_o=repr(s),
boogie=repr(Literal(boogie)),
space=repr(Literal(" ")),
o=repr(Literal(u"ö")),
o=repr(Literal("ö")),
)
)

Expand Down
2 changes: 1 addition & 1 deletion parsimonious/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def __init__(self, type):
self.type = type

def __str__(self):
return u'<Token "%s">' % (self.type,)
return '<Token "%s">' % (self.type,)

def __eq__(self, other):
return self.type == other.type

0 comments on commit 1fdd869

Please sign in to comment.