Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
use same import pattern for six.urllib accross the codebase
Browse files Browse the repository at this point in the history
- also, re-ogranise six lib imports
manu-chroma committed Jun 27, 2017

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
1 parent d23c804 commit 9874d11
Showing 7 changed files with 41 additions and 43 deletions.
13 changes: 7 additions & 6 deletions schema_salad/jsonld_context.py
Original file line number Diff line number Diff line change
@@ -2,10 +2,11 @@
import collections
import shutil
import json
import ruamel.yaml as yaml

import six
# import urlparse
from six.moves.urllib import parse
from six.moves import urllib

import ruamel.yaml as yaml
try:
from ruamel.yaml import CSafeLoader as SafeLoader
except ImportError:
@@ -37,7 +38,7 @@ def pred(datatype, # type: Dict[str, Union[Dict, str]]
namespaces # type: Dict[str, rdflib.namespace.Namespace]
):
# type: (...) -> Union[Dict, Text]
split = parse.urlsplit(name)
split = urllib.parse.urlsplit(name)

vee = None # type: Optional[Union[str, Text]]

@@ -105,7 +106,7 @@ def process_type(t, # type: Dict[str, Any]
classnode = URIRef(recordname)
g.add((classnode, RDF.type, RDFS.Class))

split = parse.urlsplit(recordname)
split = urllib.parse.urlsplit(recordname)
predicate = recordname
if t.get("inVocab", True):
if split.scheme:
@@ -221,7 +222,7 @@ def makerdf(workflow, # type: Union[str, Text]
url = v
if url == "@id":
idfields.append(k)
doc_url, frg = parse.urldefrag(url)
doc_url, frg = urllib.parse.urldefrag(url)
if "/" in frg:
p = frg.split("/")[0]
prefixes[p] = u"%s#%s/" % (doc_url, p)
7 changes: 3 additions & 4 deletions schema_salad/main.py
Original file line number Diff line number Diff line change
@@ -6,9 +6,8 @@
import traceback
import json
import os
# import urlparse
from six.moves.urllib import parse

from six.moves import urllib

import pkg_resources # part of setuptools

@@ -112,7 +111,7 @@ def main(argsl=None): # type: (List[str]) -> int
# Load schema document and resolve refs

schema_uri = args.schema
if not parse.urlparse(schema_uri)[0]:
if not urllib.parse.urlparse(schema_uri)[0]:
schema_uri = "file://" + os.path.abspath(schema_uri)
schema_raw_doc = metaschema_loader.fetch(schema_uri)

@@ -209,7 +208,7 @@ def main(argsl=None): # type: (List[str]) -> int
# Load target document and resolve refs
try:
uri = args.document
if not parse.urlparse(uri)[0]:
if not urllib.parse.urlparse(uri)[0]:
doc = "file://" + os.path.abspath(uri)
document, doc_metadata = document_loader.resolve_ref(uri)
except (validate.ValidationException, RuntimeError) as e:
15 changes: 7 additions & 8 deletions schema_salad/makedoc.py
Original file line number Diff line number Diff line change
@@ -7,15 +7,14 @@
import copy
import re
import sys

import logging

from . import schema
from schema_salad.utils import add_dictlist, aslist
from .utils import add_dictlist, aslist

import six
from six.moves import range
from six.moves.urllib import parse
from six.moves import urllib
from six import StringIO

from typing import cast, Any, Dict, IO, List, Optional, Set, Text, Union
@@ -42,7 +41,7 @@ def has_types(items): # type: (Any) -> List[basestring]


def linkto(item): # type: (Text) -> Text
_, frg = parse.urldefrag(item)
_, frg = urllib.parse.urldefrag(item)
return "[%s](#%s)" % (frg, to_id(frg))


@@ -210,8 +209,8 @@ def __init__(self, toc, j, renderlist, redirects, primitiveType):
if tp not in self.uses:
self.uses[tp] = []
if (t["name"], f["name"]) not in self.uses[tp]:
_, frg1 = parse.urldefrag(t["name"])
_, frg2 = parse.urldefrag(f["name"])
_, frg1 = urllib.parse.urldefrag(t["name"])
_, frg2 = urllib.parse.urldefrag(f["name"])
self.uses[tp].append((frg1, frg2))
if tp not in basicTypes and tp not in self.record_refs[t["name"]]:
self.record_refs[t["name"]].append(tp)
@@ -272,7 +271,7 @@ def typefmt(self,
elif str(tp) in basicTypes:
return """<a href="%s">%s</a>""" % (self.primitiveType, schema.avro_name(str(tp)))
else:
_, frg = parse.urldefrag(tp)
_, frg = urllib.parse.urldefrag(tp)
if frg is not '':
tp = frg
return """<a href="#%s">%s</a>""" % (to_id(tp), tp)
@@ -331,7 +330,7 @@ def extendsfrom(item, ex):
lines.append(l)
f["doc"] = "\n".join(lines)

_, frg = parse.urldefrag(f["name"])
_, frg = urllib.parse.urldefrag(f["name"])
num = self.toc.add_entry(depth, frg)
doc = "%s %s %s\n" % (("#" * depth), num, frg)
else:
23 changes: 11 additions & 12 deletions schema_salad/ref_resolver.py
Original file line number Diff line number Diff line change
@@ -8,7 +8,6 @@

import six
from six.moves import range
from six.moves.urllib import parse
from six.moves import urllib
from six import StringIO

@@ -57,7 +56,7 @@ def file_uri(path, split_frag=False): # type: (str, bool) -> str
return "file://%s%s" % (urlpath, frag)

def uri_file_path(url): # type: (str) -> str
split = parse.urlsplit(url)
split = urllib.parse.urlsplit(url)
if split.scheme == "file":
return urllib.request.url2pathname(
str(split.path)) + ("#" + urllib.parse.unquote(str(split.fragment))
@@ -129,7 +128,7 @@ def fetch_text(self, url):
if url in self.cache:
return self.cache[url]

split = parse.urlsplit(url)
split = urllib.parse.urlsplit(url)
scheme, path = split.scheme, split.path

if scheme in [u'http', u'https'] and self.session is not None:
@@ -159,7 +158,7 @@ def check_exists(self, url): # type: (Text) -> bool
if url in self.cache:
return True

split = parse.urlsplit(url)
split = urllib.parse.urlsplit(url)
scheme, path = split.scheme, split.path

if scheme in [u'http', u'https'] and self.session is not None:
@@ -175,7 +174,7 @@ def check_exists(self, url): # type: (Text) -> bool
raise ValueError('Unsupported scheme in url: %s' % url)

def urljoin(self, base_url, url): # type: (Text, Text) -> Text
return parse.urljoin(base_url, url)
return urllib.parse.urljoin(base_url, url)

class Loader(object):
def __init__(self,
@@ -190,7 +189,7 @@ def __init__(self,
):
# type: (...) -> None

normalize = lambda url: parse.urlsplit(url).geturl()
normalize = lambda url: urllib.parse.urlsplit(url).geturl()
if idx is not None:
self.idx = idx
else:
@@ -276,20 +275,20 @@ def expand_url(self,
if prefix in self.vocab:
url = self.vocab[prefix] + url[len(prefix) + 1:]

split = parse.urlsplit(url)
split = urllib.parse.urlsplit(url)

if (bool(split.scheme) or url.startswith(u"$(")
or url.startswith(u"${")):
pass
elif scoped_id and not bool(split.fragment):
splitbase = parse.urlsplit(base_url)
splitbase = urllib.parse.urlsplit(base_url)
frg = u""
if bool(splitbase.fragment):
frg = splitbase.fragment + u"/" + split.path
else:
frg = split.path
pt = splitbase.path if splitbase.path != '' else "/"
url = parse.urlunsplit(
url = urllib.parse.urlunsplit(
(splitbase.scheme, splitbase.netloc, pt, splitbase.query, frg))
elif scoped_ref is not None and not split.fragment:
pass
@@ -496,7 +495,7 @@ def resolve_ref(self,
doc_url = url
else:
# Load structured document
doc_url, frg = parse.urldefrag(url)
doc_url, frg = urllib.parse.urldefrag(url)
if doc_url in self.idx and (not mixin):
# If the base document is in the index, it was already loaded,
# so if we didn't find the reference earlier then it must not
@@ -872,7 +871,7 @@ def fetch(self, url, inject_ids=True): # type: (Text, bool) -> Any

def validate_scoped(self, field, link, docid):
# type: (Text, Text, Text) -> Text
split = parse.urlsplit(docid)
split = urllib.parse.urlsplit(docid)
sp = split.fragment.split(u"/")
n = self.scoped_ref_fields[field]
while n > 0 and len(sp) > 0:
@@ -881,7 +880,7 @@ def validate_scoped(self, field, link, docid):
tried = []
while True:
sp.append(link)
url = parse.urlunsplit((
url = urllib.parse.urlunsplit((
split.scheme, split.netloc, split.path, split.query,
u"/".join(sp)))
tried.append(url)
6 changes: 3 additions & 3 deletions schema_salad/schema.py
Original file line number Diff line number Diff line change
@@ -9,10 +9,10 @@
import avro.schema
from . import validate
import json
import os

from six.moves.urllib import parse
import six
import os
from six.moves import urllib

if six.PY3:
AvroSchemaFromJSONData = avro.schema.SchemaFromJSONData
@@ -383,7 +383,7 @@ def replace_type(items, spec, loader, found):


def avro_name(url): # type: (AnyStr) -> AnyStr
doc_url, frg = parse.urldefrag(url)
doc_url, frg = urllib.parse.urldefrag(url)
if frg != '':
if '/' in frg:
return frg[frg.rindex('/') + 1:]
10 changes: 5 additions & 5 deletions schema_salad/tests/test_fetch.py
Original file line number Diff line number Diff line change
@@ -9,8 +9,8 @@
import ruamel.yaml as yaml
import json
import os
# import urlparse
from six.moves.urllib import parse

from six.moves import urllib

class TestFetcher(unittest.TestCase):
def test_fetcher(self):
@@ -33,14 +33,14 @@ def check_exists(self, url): # type: (unicode) -> bool
return False

def urljoin(self, base, url):
urlsp = parse.urlsplit(url)
urlsp = urllib.parse.urlsplit(url)
if urlsp.scheme:
return url
basesp = parse.urlsplit(base)
basesp = urllib.parse.urlsplit(base)

if basesp.scheme == "keep":
return base + "/" + url
return parse.urljoin(base, url)
return urllib.parse.urljoin(base, url)

loader = schema_salad.ref_resolver.Loader({}, fetcher_constructor=TestFetcher)
self.assertEqual({"hello": "foo"}, loader.resolve_ref("foo.txt")[0])
10 changes: 5 additions & 5 deletions schema_salad/validate.py
Original file line number Diff line number Diff line change
@@ -3,16 +3,16 @@
import avro.schema
from avro.schema import Schema
import sys
# import urlparse
from six.moves.urllib import parse
import re
import logging

from typing import Any, List, Set, Union, Text
from .sourceline import SourceLine, lineno_re, bullets, indent
import six
from six.moves import urllib
from six.moves import range

from typing import Any, List, Set, Union, Text
from .sourceline import SourceLine, lineno_re, bullets, indent

_logger = logging.getLogger("salad")

class ValidationException(Exception):
@@ -301,7 +301,7 @@ def validate_ex(expected_schema, # type: Schema
if (d not in identifiers and strict) and (
d not in foreign_properties and strict_foreign_properties) and not raise_ex:
return False
split = parse.urlsplit(d)
split = urllib.parse.urlsplit(d)
if split.scheme:
err = sl.makeError(u"unrecognized extension field `%s`%s."
" Did you include "

0 comments on commit 9874d11

Please sign in to comment.