Skip to content

Commit

Permalink
black line-length set to 79 and coverage increased
Browse files Browse the repository at this point in the history
  • Loading branch information
deepakdinesh1123 committed Mar 21, 2022
1 parent 35c8d94 commit 302b80b
Show file tree
Hide file tree
Showing 12 changed files with 328 additions and 109 deletions.
12 changes: 10 additions & 2 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,15 +83,23 @@
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
("index", "parsel.tex", "Parsel Documentation", "Scrapy Project", "manual"),
(
"index",
"parsel.tex",
"Parsel Documentation",
"Scrapy Project",
"manual",
),
]


# -- Options for manual page output ------------------------------------

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "parsel", "Parsel Documentation", ["Scrapy Project"], 1)]
man_pages = [
("index", "parsel", "Parsel Documentation", ["Scrapy Project"], 1)
]


# -- Options for Texinfo output ----------------------------------------
Expand Down
4 changes: 3 additions & 1 deletion docs/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,9 @@
try:
from sybil.parsers.codeblock import PythonCodeBlockParser
except ImportError:
from sybil.parsers.codeblock import CodeBlockParser as PythonCodeBlockParser
from sybil.parsers.codeblock import (
CodeBlockParser as PythonCodeBlockParser,
)
from sybil.parsers.doctest import DocTestParser
from sybil.parsers.skip import skip

Expand Down
8 changes: 6 additions & 2 deletions parsel/csstranslator.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ class XPathExpr(OriginalXPathExpr):

@classmethod
def from_xpath(cls, xpath, textnode=False, attribute=None):
x = cls(path=xpath.path, element=xpath.element, condition=xpath.condition)
x = cls(
path=xpath.path, element=xpath.element, condition=xpath.condition
)
x.textnode = textnode
x.attribute = attribute
return x
Expand Down Expand Up @@ -81,7 +83,9 @@ def xpath_attr_functional_pseudo_element(self, xpath, function):
raise ExpressionError(
f"Expected a single string or ident for ::attr(), got {function.arguments!r}"
)
return XPathExpr.from_xpath(xpath, attribute=function.arguments[0].value)
return XPathExpr.from_xpath(
xpath, attribute=function.arguments[0].value
)

def xpath_text_simple_pseudo_element(self, xpath):
"""Support selecting text nodes using ::text pseudo-element"""
Expand Down
48 changes: 28 additions & 20 deletions parsel/selector.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,9 @@ def xpath(
selector.xpath('//a[href=$url]', url="http://www.example.com")
"""
return self.__class__(
flatten([x.xpath(xpath, namespaces=namespaces, **kwargs) for x in self])
flatten(
[x.xpath(xpath, namespaces=namespaces, **kwargs) for x in self]
)
)

def css(self, query: str) -> "SelectorList[_SelectorType]":
Expand All @@ -123,7 +125,9 @@ def re(
Passing ``replace_entities`` as ``False`` switches off these
replacements.
"""
return flatten([x.re(regex, replace_entities=replace_entities) for x in self])
return flatten(
[x.re(regex, replace_entities=replace_entities) for x in self]
)

def jsonpath(self, query: str) -> "SelectorList[_SelectorType]":
"""
Expand Down Expand Up @@ -290,7 +294,9 @@ def __init__(

if text is not None:
if type in ("html", "xml", None):
self._load_lxml_root(text, type=type or "html", base_url=base_url)
self._load_lxml_root(
text, type=type or "html", base_url=base_url
)
elif type == "json":
self.root = _load_json_or_none(text)
self.type = type
Expand Down Expand Up @@ -362,7 +368,10 @@ def xpath(
nsp.update(namespaces)
try:
result = xpathev(
query, namespaces=nsp, smart_strings=self._lxml_smart_strings, **kwargs
query,
namespaces=nsp,
smart_strings=self._lxml_smart_strings,
**kwargs,
)
except etree.XPathError as exc:
raise ValueError(f"XPath error: {exc} in {query}")
Expand Down Expand Up @@ -392,7 +401,9 @@ def css(self: _SelectorType, query: str) -> SelectorList[_SelectorType]:
if self.type == "text":
self._load_lxml_root(self.root, type="html")
elif self.type not in ("html", "xml"):
raise ValueError(f"Cannot use css on a Selector of type {repr(self.type)}")
raise ValueError(
f"Cannot use css on a Selector of type {repr(self.type)}"
)
return self.xpath(self._css2xpath(query))

def jsonpath(
Expand All @@ -408,22 +419,14 @@ def jsonpath(
data = self.root
elif isinstance(self.root, str):
data = _load_json_or_none(self.root)
else:
data = _load_json_or_none(self._text)

jsonpath_expr = jsonpathParser(query)
result = [json.dumps(match.value) for match in jsonpath_expr.find(data)]

if result is None:
result = []
elif not isinstance(result, list):
result = [result]
result = [
json.dumps(match.value) for match in jsonpath_expr.find(data)
]

def make_selector(x):
if isinstance(x, str):
return self.__class__(text=x, _expr=query, type=type or "text")
else:
return self.__class__(root=x, _expr=query, type=type)
return self.__class__(text=x, _expr=query, type=type or "text")

result = [make_selector(x) for x in result]
return self.selectorlist_cls(result)
Expand All @@ -446,7 +449,9 @@ def re(
Passing ``replace_entities`` as ``False`` switches off these
replacements.
"""
return extract_regex(regex, self.get(), replace_entities=replace_entities)
return extract_regex(
regex, self.get(), replace_entities=replace_entities
)

@typing.overload
def re_first(
Expand Down Expand Up @@ -483,7 +488,8 @@ def re_first(
replacements.
"""
return next(
iflatten(self.re(regex, replace_entities=replace_entities)), default
iflatten(self.re(regex, replace_entities=replace_entities)),
default,
)

def get(self) -> str:
Expand Down Expand Up @@ -582,6 +588,8 @@ def __bool__(self) -> bool:
def __str__(self) -> str:
data = repr(shorten(self.get(), width=40))
expr_field = "jsonpath" if self.type == "json" else "xpath"
return f"<{type(self).__name__} {expr_field}={self._expr!r} data={data}>"
return (
f"<{type(self).__name__} {expr_field}={self._expr!r} data={data}>"
)

__repr__ = __str__
8 changes: 6 additions & 2 deletions parsel/xpathfuncs.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,14 @@ def has_class(context, *classes):
"""
if not context.eval_context.get("args_checked"):
if not classes:
raise ValueError("XPath error: has-class must have at least 1 argument")
raise ValueError(
"XPath error: has-class must have at least 1 argument"
)
for c in classes:
if not isinstance(c, str):
raise ValueError("XPath error: has-class arguments must be strings")
raise ValueError(
"XPath error: has-class arguments must be strings"
)
context.eval_context["args_checked"] = True

node_cls = context.context_node.get("class")
Expand Down
1 change: 1 addition & 0 deletions pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ disable=bad-continuation,
too-many-arguments,
too-many-lines,
too-many-public-methods,
too-many-branches,
unidiomatic-typecheck,
unused-argument,
use-a-generator,
Expand Down
7 changes: 6 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,12 @@
"parsel": "parsel",
},
include_package_data=True,
install_requires=["cssselect>=0.9", "lxml", "w3lib>=1.19.0", "jsonpath_ng>=1.5.3"],
install_requires=[
"cssselect>=0.9",
"lxml",
"w3lib>=1.19.0",
"jsonpath_ng>=1.5.3",
],
python_requires=">=3.6",
license="BSD",
zip_safe=False,
Expand Down
Loading

0 comments on commit 302b80b

Please sign in to comment.