Skip to content

Commit

Permalink
Remove py2 compat (#2645)
Browse files Browse the repository at this point in the history
* strip out PY2 compat code from pycompat.py

* isort

* remove 2 unused imports

* remove extra import

* no more future

* no unicode literals

* no more ReprMixin

* cleanup merge

* remove deprecated imports from collections

* 2 more cleanups from shoyer
  • Loading branch information
Joe Hamman authored and shoyer committed Jan 25, 2019
1 parent 79fa060 commit aabda43
Show file tree
Hide file tree
Showing 90 changed files with 338 additions and 794 deletions.
2 changes: 0 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
Expand Down
3 changes: 0 additions & 3 deletions xarray/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
# flake8: noqa
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from ._version import get_versions
__version__ = get_versions()['version']
Expand Down
32 changes: 15 additions & 17 deletions xarray/backends/api.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,16 @@
from __future__ import absolute_import, division, print_function

import os.path
import warnings
from glob import glob
from io import BytesIO
from numbers import Number
from pathlib import Path

import numpy as np

from .. import Dataset, backends, conventions
from ..core import indexing
from ..core.combine import (
_CONCAT_DIM_DEFAULT, _auto_combine, _infer_concat_order_from_positions)
from ..core.pycompat import basestring, path_type
from ..core.utils import close_on_error, is_grib_path, is_remote_uri
from .common import ArrayWriter
from .locks import _get_scheduler
Expand Down Expand Up @@ -99,7 +97,7 @@ def _normalize_path(path):
def _validate_dataset_names(dataset):
"""DataArray.name and Dataset keys must be a string or None"""
def check_name(name):
if isinstance(name, basestring):
if isinstance(name, str):
if not name:
raise ValueError('Invalid name for DataArray or Dataset key: '
'string must be length 1 or greater for '
Expand All @@ -117,7 +115,7 @@ def _validate_attrs(dataset):
a string, an ndarray or a list/tuple of numbers/strings.
"""
def check_attr(name, value):
if isinstance(name, basestring):
if isinstance(name, str):
if not name:
raise ValueError('Invalid name for attr: string must be '
'length 1 or greater for serialization to '
Expand All @@ -126,7 +124,7 @@ def check_attr(name, value):
raise TypeError("Invalid name for attr: {} must be a string for "
"serialization to netCDF files".format(name))

if not isinstance(value, (basestring, Number, np.ndarray, np.number,
if not isinstance(value, (str, Number, np.ndarray, np.number,
list, tuple)):
raise TypeError('Invalid value for attr: {} must be a number, '
'a string, an ndarray or a list/tuple of '
Expand Down Expand Up @@ -279,7 +277,7 @@ def maybe_decode_store(store, lock=False):
from dask.base import tokenize
# if passed an actual file path, augment the token with
# the file modification time
if (isinstance(filename_or_obj, basestring) and
if (isinstance(filename_or_obj, str) and
not is_remote_uri(filename_or_obj)):
mtime = os.path.getmtime(filename_or_obj)
else:
Expand All @@ -295,13 +293,13 @@ def maybe_decode_store(store, lock=False):

return ds2

if isinstance(filename_or_obj, path_type):
if isinstance(filename_or_obj, Path):
filename_or_obj = str(filename_or_obj)

if isinstance(filename_or_obj, backends.AbstractDataStore):
store = filename_or_obj
ds = maybe_decode_store(store)
elif isinstance(filename_or_obj, basestring):
elif isinstance(filename_or_obj, str):

if (isinstance(filename_or_obj, bytes) and
filename_or_obj.startswith(b'\x89HDF')):
Expand All @@ -310,7 +308,7 @@ def maybe_decode_store(store, lock=False):
filename_or_obj.startswith(b'CDF')):
# netCDF3 file images are handled by scipy
pass
elif isinstance(filename_or_obj, basestring):
elif isinstance(filename_or_obj, str):
filename_or_obj = _normalize_path(filename_or_obj)

if engine is None:
Expand Down Expand Up @@ -352,7 +350,7 @@ def maybe_decode_store(store, lock=False):

# Ensure source filename always stored in dataset object (GH issue #2550)
if 'source' not in ds.encoding:
if isinstance(filename_or_obj, basestring):
if isinstance(filename_or_obj, str):
ds.encoding['source'] = filename_or_obj

return ds
Expand Down Expand Up @@ -588,15 +586,15 @@ def open_mfdataset(paths, chunks=None, concat_dim=_CONCAT_DIM_DEFAULT,
.. [1] http://xarray.pydata.org/en/stable/dask.html
.. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
""" # noqa
if isinstance(paths, basestring):
if isinstance(paths, str):
if is_remote_uri(paths):
raise ValueError(
'cannot do wild-card matching for paths that are remote URLs: '
'{!r}. Instead, supply paths as an explicit list of strings.'
.format(paths))
paths = sorted(glob(paths))
else:
paths = [str(p) if isinstance(p, path_type) else p for p in paths]
paths = [str(p) if isinstance(p, Path) else p for p in paths]

if not paths:
raise IOError('no files to open')
Expand Down Expand Up @@ -681,7 +679,7 @@ def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None,
The ``multifile`` argument is only for the private use of save_mfdataset.
"""
if isinstance(path_or_file, path_type):
if isinstance(path_or_file, Path):
path_or_file = str(path_or_file)

if encoding is None:
Expand All @@ -698,7 +696,7 @@ def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None,
raise NotImplementedError(
'to_netcdf() with compute=False is not yet implemented when '
'returning bytes')
elif isinstance(path_or_file, basestring):
elif isinstance(path_or_file, str):
if engine is None:
engine = _get_default_engine(path_or_file)
path_or_file = _normalize_path(path_or_file)
Expand Down Expand Up @@ -733,7 +731,7 @@ def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None,

if unlimited_dims is None:
unlimited_dims = dataset.encoding.get('unlimited_dims', None)
if isinstance(unlimited_dims, basestring):
if isinstance(unlimited_dims, str):
unlimited_dims = [unlimited_dims]

writer = ArrayWriter()
Expand Down Expand Up @@ -896,7 +894,7 @@ def to_zarr(dataset, store=None, mode='w-', synchronizer=None, group=None,
See `Dataset.to_zarr` for full API docs.
"""
if isinstance(store, path_type):
if isinstance(store, Path):
store = str(store)
if encoding is None:
encoding = {}
Expand Down
2 changes: 0 additions & 2 deletions xarray/backends/cfgrib_.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from __future__ import absolute_import, division, print_function

import numpy as np

from .. import Variable
Expand Down
15 changes: 7 additions & 8 deletions xarray/backends/common.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,15 @@
from __future__ import absolute_import, division, print_function

import logging
import time
import traceback
import warnings
from collections import Mapping, OrderedDict
from collections import OrderedDict
from collections.abc import Mapping

import numpy as np

from ..conventions import cf_encoder
from ..core import indexing
from ..core.pycompat import dask_array_type, iteritems
from ..core.pycompat import dask_array_type
from ..core.utils import FrozenOrderedDict, NdimSizeLenMixin

# Create a logger object, but don't add any handlers. Leave that to user code.
Expand Down Expand Up @@ -109,9 +108,9 @@ class SuffixAppendingDataStore(AbstractDataStore):
def load(self):
variables, attributes = AbstractDataStore.load(self)
variables = {'%s_suffix' % k: v
for k, v in iteritems(variables)}
for k, v in variables.items()}
attributes = {'%s_suffix' % k: v
for k, v in iteritems(attributes)}
for k, v in attributes.items()}
return variables, attributes
This function will be called anytime variables or attributes
Expand Down Expand Up @@ -275,7 +274,7 @@ def set_attributes(self, attributes):
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
"""
for k, v in iteritems(attributes):
for k, v in attributes.items():
self.set_attribute(k, v)

def set_variables(self, variables, check_encoding_set, writer,
Expand All @@ -297,7 +296,7 @@ def set_variables(self, variables, check_encoding_set, writer,
dimensions.
"""

for vn, v in iteritems(variables):
for vn, v in variables.items():
name = _encode_variable_name(vn)
check = vn in check_encoding_set
target, source = self.prepare_variable(
Expand Down
14 changes: 6 additions & 8 deletions xarray/backends/h5netcdf_.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
from __future__ import absolute_import, division, print_function

import functools
from collections import OrderedDict

import numpy as np

from .. import Variable
from ..core import indexing
from ..core.pycompat import OrderedDict, bytes_type, iteritems, unicode_type
from ..core.utils import FrozenOrderedDict, close_on_error
from .common import WritableCFDataStore
from .file_manager import CachingFileManager
Expand All @@ -32,7 +30,7 @@ def _getitem(self, key):


def maybe_decode_bytes(txt):
if isinstance(txt, bytes_type):
if isinstance(txt, bytes):
return txt.decode('utf-8')
else:
return txt
Expand Down Expand Up @@ -124,7 +122,7 @@ def open_store_variable(self, name, var):
encoding['original_shape'] = var.shape

vlen_dtype = h5py.check_dtype(vlen=var.dtype)
if vlen_dtype is unicode_type:
if vlen_dtype is str:
encoding['dtype'] = str
elif vlen_dtype is not None: # pragma: no cover
# xarray doesn't support writing arbitrary vlen dtypes yet.
Expand All @@ -136,7 +134,7 @@ def open_store_variable(self, name, var):

def get_variables(self):
return FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in iteritems(self.ds.variables))
for k, v in self.ds.variables.items())

def get_attrs(self):
return FrozenOrderedDict(_read_attributes(self.ds))
Expand Down Expand Up @@ -182,7 +180,7 @@ def prepare_variable(self, name, variable, check_encoding=False,
'NC_CHAR type.' % name)

if dtype is str:
dtype = h5py.special_dtype(vlen=unicode_type)
dtype = h5py.special_dtype(vlen=str)

encoding = _extract_h5nc_encoding(variable,
raise_on_invalid=check_encoding)
Expand Down Expand Up @@ -221,7 +219,7 @@ def prepare_variable(self, name, variable, check_encoding=False,
else:
nc4_var = self.ds[name]

for k, v in iteritems(attrs):
for k, v in attrs.items():
nc4_var.attrs[k] = v

target = H5NetCDFArrayWrapper(name, self)
Expand Down
4 changes: 1 addition & 3 deletions xarray/backends/lru_cache.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
import collections
import threading

from ..core.pycompat import move_to_end


class LRUCache(collections.MutableMapping):
"""Thread-safe LRUCache based on an OrderedDict.
Expand Down Expand Up @@ -41,7 +39,7 @@ def __getitem__(self, key):
# record recent use of the key by moving it to the front of the list
with self._lock:
value = self._cache[key]
move_to_end(self._cache, key)
self._cache.move_to_end(key)
return value

def _enforce_size_limit(self, capacity):
Expand Down
4 changes: 1 addition & 3 deletions xarray/backends/memory.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
from __future__ import absolute_import, division, print_function

import copy
from collections import OrderedDict

import numpy as np

from ..core.pycompat import OrderedDict
from ..core.variable import Variable
from .common import AbstractWritableDataStore

Expand Down
16 changes: 6 additions & 10 deletions xarray/backends/netCDF4_.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,15 @@
from __future__ import absolute_import, division, print_function

import functools
import operator
import warnings
from collections import OrderedDict
from contextlib import suppress
from distutils.version import LooseVersion

import numpy as np

from .. import Variable, coding
from ..coding.variables import pop_to
from ..core import indexing
from ..core.pycompat import PY3, OrderedDict, basestring, iteritems, suppress
from ..core.utils import FrozenOrderedDict, close_on_error, is_remote_uri
from .common import (
BackendArray, WritableCFDataStore, find_root, robust_getitem)
Expand Down Expand Up @@ -81,9 +80,6 @@ def _getitem(self, key):
msg = ('The indexing operation you are attempting to perform '
'is not valid on netCDF4.Variable object. Try loading '
'your data into memory first by calling .load().')
if not PY3:
import traceback
msg += '\n\nOriginal traceback:\n' + traceback.format_exc()
raise IndexError(msg)
return array

Expand Down Expand Up @@ -141,7 +137,7 @@ def _nc4_require_group(ds, group, mode, create_group=_netcdf4_create_group):
return ds
else:
# make sure it's a string
if not isinstance(group, basestring):
if not isinstance(group, str):
raise ValueError('group must be a string or None')
# support path-like syntax
path = group.strip('/').split('/')
Expand Down Expand Up @@ -392,7 +388,7 @@ def open_store_variable(self, name, var):
def get_variables(self):
dsvars = FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in
iteritems(self.ds.variables))
self.ds.variables.items())
return dsvars

def get_attrs(self):
Expand All @@ -402,7 +398,7 @@ def get_attrs(self):

def get_dimensions(self):
dims = FrozenOrderedDict((k, len(v))
for k, v in iteritems(self.ds.dimensions))
for k, v in self.ds.dimensions.items())
return dims

def get_encoding(self):
Expand Down Expand Up @@ -467,7 +463,7 @@ def prepare_variable(self, name, variable, check_encoding=False,
fill_value=fill_value)
_disable_auto_decode_variable(nc4_var)

for k, v in iteritems(attrs):
for k, v in attrs.items():
# set attributes one-by-one since netCDF4<1.0.10 can't handle
# OrderedDict as the input to setncatts
_set_nc_attribute(nc4_var, k, v)
Expand Down
Loading

0 comments on commit aabda43

Please sign in to comment.