Skip to content

Commit aabda43

Browse files
Joe Hammanshoyer
authored andcommitted
Remove py2 compat (#2645)
* strip out PY2 compat code from pycompat.py * isort * remove 2 unused imports * remove extra import * no more future * no unicode literals * no more ReprMixin * cleanup merge * remove deprecated imports from collections * 2 more cleanups from shoyer
1 parent 79fa060 commit aabda43

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

90 files changed

+338
-794
lines changed

setup.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,6 @@
1515
'Operating System :: OS Independent',
1616
'Intended Audience :: Science/Research',
1717
'Programming Language :: Python',
18-
'Programming Language :: Python :: 2',
19-
'Programming Language :: Python :: 2.7',
2018
'Programming Language :: Python :: 3',
2119
'Programming Language :: Python :: 3.5',
2220
'Programming Language :: Python :: 3.6',

xarray/__init__.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,4 @@
11
# flake8: noqa
2-
from __future__ import absolute_import
3-
from __future__ import division
4-
from __future__ import print_function
52

63
from ._version import get_versions
74
__version__ = get_versions()['version']

xarray/backends/api.py

Lines changed: 15 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,16 @@
1-
from __future__ import absolute_import, division, print_function
2-
31
import os.path
42
import warnings
53
from glob import glob
64
from io import BytesIO
75
from numbers import Number
6+
from pathlib import Path
87

98
import numpy as np
109

1110
from .. import Dataset, backends, conventions
1211
from ..core import indexing
1312
from ..core.combine import (
1413
_CONCAT_DIM_DEFAULT, _auto_combine, _infer_concat_order_from_positions)
15-
from ..core.pycompat import basestring, path_type
1614
from ..core.utils import close_on_error, is_grib_path, is_remote_uri
1715
from .common import ArrayWriter
1816
from .locks import _get_scheduler
@@ -99,7 +97,7 @@ def _normalize_path(path):
9997
def _validate_dataset_names(dataset):
10098
"""DataArray.name and Dataset keys must be a string or None"""
10199
def check_name(name):
102-
if isinstance(name, basestring):
100+
if isinstance(name, str):
103101
if not name:
104102
raise ValueError('Invalid name for DataArray or Dataset key: '
105103
'string must be length 1 or greater for '
@@ -117,7 +115,7 @@ def _validate_attrs(dataset):
117115
a string, an ndarray or a list/tuple of numbers/strings.
118116
"""
119117
def check_attr(name, value):
120-
if isinstance(name, basestring):
118+
if isinstance(name, str):
121119
if not name:
122120
raise ValueError('Invalid name for attr: string must be '
123121
'length 1 or greater for serialization to '
@@ -126,7 +124,7 @@ def check_attr(name, value):
126124
raise TypeError("Invalid name for attr: {} must be a string for "
127125
"serialization to netCDF files".format(name))
128126

129-
if not isinstance(value, (basestring, Number, np.ndarray, np.number,
127+
if not isinstance(value, (str, Number, np.ndarray, np.number,
130128
list, tuple)):
131129
raise TypeError('Invalid value for attr: {} must be a number, '
132130
'a string, an ndarray or a list/tuple of '
@@ -279,7 +277,7 @@ def maybe_decode_store(store, lock=False):
279277
from dask.base import tokenize
280278
# if passed an actual file path, augment the token with
281279
# the file modification time
282-
if (isinstance(filename_or_obj, basestring) and
280+
if (isinstance(filename_or_obj, str) and
283281
not is_remote_uri(filename_or_obj)):
284282
mtime = os.path.getmtime(filename_or_obj)
285283
else:
@@ -295,13 +293,13 @@ def maybe_decode_store(store, lock=False):
295293

296294
return ds2
297295

298-
if isinstance(filename_or_obj, path_type):
296+
if isinstance(filename_or_obj, Path):
299297
filename_or_obj = str(filename_or_obj)
300298

301299
if isinstance(filename_or_obj, backends.AbstractDataStore):
302300
store = filename_or_obj
303301
ds = maybe_decode_store(store)
304-
elif isinstance(filename_or_obj, basestring):
302+
elif isinstance(filename_or_obj, str):
305303

306304
if (isinstance(filename_or_obj, bytes) and
307305
filename_or_obj.startswith(b'\x89HDF')):
@@ -310,7 +308,7 @@ def maybe_decode_store(store, lock=False):
310308
filename_or_obj.startswith(b'CDF')):
311309
# netCDF3 file images are handled by scipy
312310
pass
313-
elif isinstance(filename_or_obj, basestring):
311+
elif isinstance(filename_or_obj, str):
314312
filename_or_obj = _normalize_path(filename_or_obj)
315313

316314
if engine is None:
@@ -352,7 +350,7 @@ def maybe_decode_store(store, lock=False):
352350

353351
# Ensure source filename always stored in dataset object (GH issue #2550)
354352
if 'source' not in ds.encoding:
355-
if isinstance(filename_or_obj, basestring):
353+
if isinstance(filename_or_obj, str):
356354
ds.encoding['source'] = filename_or_obj
357355

358356
return ds
@@ -588,15 +586,15 @@ def open_mfdataset(paths, chunks=None, concat_dim=_CONCAT_DIM_DEFAULT,
588586
.. [1] http://xarray.pydata.org/en/stable/dask.html
589587
.. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
590588
""" # noqa
591-
if isinstance(paths, basestring):
589+
if isinstance(paths, str):
592590
if is_remote_uri(paths):
593591
raise ValueError(
594592
'cannot do wild-card matching for paths that are remote URLs: '
595593
'{!r}. Instead, supply paths as an explicit list of strings.'
596594
.format(paths))
597595
paths = sorted(glob(paths))
598596
else:
599-
paths = [str(p) if isinstance(p, path_type) else p for p in paths]
597+
paths = [str(p) if isinstance(p, Path) else p for p in paths]
600598

601599
if not paths:
602600
raise IOError('no files to open')
@@ -681,7 +679,7 @@ def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None,
681679
682680
The ``multifile`` argument is only for the private use of save_mfdataset.
683681
"""
684-
if isinstance(path_or_file, path_type):
682+
if isinstance(path_or_file, Path):
685683
path_or_file = str(path_or_file)
686684

687685
if encoding is None:
@@ -698,7 +696,7 @@ def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None,
698696
raise NotImplementedError(
699697
'to_netcdf() with compute=False is not yet implemented when '
700698
'returning bytes')
701-
elif isinstance(path_or_file, basestring):
699+
elif isinstance(path_or_file, str):
702700
if engine is None:
703701
engine = _get_default_engine(path_or_file)
704702
path_or_file = _normalize_path(path_or_file)
@@ -733,7 +731,7 @@ def to_netcdf(dataset, path_or_file=None, mode='w', format=None, group=None,
733731

734732
if unlimited_dims is None:
735733
unlimited_dims = dataset.encoding.get('unlimited_dims', None)
736-
if isinstance(unlimited_dims, basestring):
734+
if isinstance(unlimited_dims, str):
737735
unlimited_dims = [unlimited_dims]
738736

739737
writer = ArrayWriter()
@@ -896,7 +894,7 @@ def to_zarr(dataset, store=None, mode='w-', synchronizer=None, group=None,
896894
897895
See `Dataset.to_zarr` for full API docs.
898896
"""
899-
if isinstance(store, path_type):
897+
if isinstance(store, Path):
900898
store = str(store)
901899
if encoding is None:
902900
encoding = {}

xarray/backends/cfgrib_.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
from __future__ import absolute_import, division, print_function
2-
31
import numpy as np
42

53
from .. import Variable

xarray/backends/common.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,15 @@
1-
from __future__ import absolute_import, division, print_function
2-
31
import logging
42
import time
53
import traceback
64
import warnings
7-
from collections import Mapping, OrderedDict
5+
from collections import OrderedDict
6+
from collections.abc import Mapping
87

98
import numpy as np
109

1110
from ..conventions import cf_encoder
1211
from ..core import indexing
13-
from ..core.pycompat import dask_array_type, iteritems
12+
from ..core.pycompat import dask_array_type
1413
from ..core.utils import FrozenOrderedDict, NdimSizeLenMixin
1514

1615
# Create a logger object, but don't add any handlers. Leave that to user code.
@@ -109,9 +108,9 @@ class SuffixAppendingDataStore(AbstractDataStore):
109108
def load(self):
110109
variables, attributes = AbstractDataStore.load(self)
111110
variables = {'%s_suffix' % k: v
112-
for k, v in iteritems(variables)}
111+
for k, v in variables.items()}
113112
attributes = {'%s_suffix' % k: v
114-
for k, v in iteritems(attributes)}
113+
for k, v in attributes.items()}
115114
return variables, attributes
116115
117116
This function will be called anytime variables or attributes
@@ -275,7 +274,7 @@ def set_attributes(self, attributes):
275274
attributes : dict-like
276275
Dictionary of key/value (attribute name / attribute) pairs
277276
"""
278-
for k, v in iteritems(attributes):
277+
for k, v in attributes.items():
279278
self.set_attribute(k, v)
280279

281280
def set_variables(self, variables, check_encoding_set, writer,
@@ -297,7 +296,7 @@ def set_variables(self, variables, check_encoding_set, writer,
297296
dimensions.
298297
"""
299298

300-
for vn, v in iteritems(variables):
299+
for vn, v in variables.items():
301300
name = _encode_variable_name(vn)
302301
check = vn in check_encoding_set
303302
target, source = self.prepare_variable(

xarray/backends/h5netcdf_.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,10 @@
1-
from __future__ import absolute_import, division, print_function
2-
31
import functools
2+
from collections import OrderedDict
43

54
import numpy as np
65

76
from .. import Variable
87
from ..core import indexing
9-
from ..core.pycompat import OrderedDict, bytes_type, iteritems, unicode_type
108
from ..core.utils import FrozenOrderedDict, close_on_error
119
from .common import WritableCFDataStore
1210
from .file_manager import CachingFileManager
@@ -32,7 +30,7 @@ def _getitem(self, key):
3230

3331

3432
def maybe_decode_bytes(txt):
35-
if isinstance(txt, bytes_type):
33+
if isinstance(txt, bytes):
3634
return txt.decode('utf-8')
3735
else:
3836
return txt
@@ -124,7 +122,7 @@ def open_store_variable(self, name, var):
124122
encoding['original_shape'] = var.shape
125123

126124
vlen_dtype = h5py.check_dtype(vlen=var.dtype)
127-
if vlen_dtype is unicode_type:
125+
if vlen_dtype is str:
128126
encoding['dtype'] = str
129127
elif vlen_dtype is not None: # pragma: no cover
130128
# xarray doesn't support writing arbitrary vlen dtypes yet.
@@ -136,7 +134,7 @@ def open_store_variable(self, name, var):
136134

137135
def get_variables(self):
138136
return FrozenOrderedDict((k, self.open_store_variable(k, v))
139-
for k, v in iteritems(self.ds.variables))
137+
for k, v in self.ds.variables.items())
140138

141139
def get_attrs(self):
142140
return FrozenOrderedDict(_read_attributes(self.ds))
@@ -182,7 +180,7 @@ def prepare_variable(self, name, variable, check_encoding=False,
182180
'NC_CHAR type.' % name)
183181

184182
if dtype is str:
185-
dtype = h5py.special_dtype(vlen=unicode_type)
183+
dtype = h5py.special_dtype(vlen=str)
186184

187185
encoding = _extract_h5nc_encoding(variable,
188186
raise_on_invalid=check_encoding)
@@ -221,7 +219,7 @@ def prepare_variable(self, name, variable, check_encoding=False,
221219
else:
222220
nc4_var = self.ds[name]
223221

224-
for k, v in iteritems(attrs):
222+
for k, v in attrs.items():
225223
nc4_var.attrs[k] = v
226224

227225
target = H5NetCDFArrayWrapper(name, self)

xarray/backends/lru_cache.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
11
import collections
22
import threading
33

4-
from ..core.pycompat import move_to_end
5-
64

75
class LRUCache(collections.MutableMapping):
86
"""Thread-safe LRUCache based on an OrderedDict.
@@ -41,7 +39,7 @@ def __getitem__(self, key):
4139
# record recent use of the key by moving it to the front of the list
4240
with self._lock:
4341
value = self._cache[key]
44-
move_to_end(self._cache, key)
42+
self._cache.move_to_end(key)
4543
return value
4644

4745
def _enforce_size_limit(self, capacity):

xarray/backends/memory.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,8 @@
1-
from __future__ import absolute_import, division, print_function
2-
31
import copy
2+
from collections import OrderedDict
43

54
import numpy as np
65

7-
from ..core.pycompat import OrderedDict
86
from ..core.variable import Variable
97
from .common import AbstractWritableDataStore
108

xarray/backends/netCDF4_.py

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,15 @@
1-
from __future__ import absolute_import, division, print_function
2-
31
import functools
42
import operator
53
import warnings
4+
from collections import OrderedDict
5+
from contextlib import suppress
66
from distutils.version import LooseVersion
77

88
import numpy as np
99

1010
from .. import Variable, coding
1111
from ..coding.variables import pop_to
1212
from ..core import indexing
13-
from ..core.pycompat import PY3, OrderedDict, basestring, iteritems, suppress
1413
from ..core.utils import FrozenOrderedDict, close_on_error, is_remote_uri
1514
from .common import (
1615
BackendArray, WritableCFDataStore, find_root, robust_getitem)
@@ -81,9 +80,6 @@ def _getitem(self, key):
8180
msg = ('The indexing operation you are attempting to perform '
8281
'is not valid on netCDF4.Variable object. Try loading '
8382
'your data into memory first by calling .load().')
84-
if not PY3:
85-
import traceback
86-
msg += '\n\nOriginal traceback:\n' + traceback.format_exc()
8783
raise IndexError(msg)
8884
return array
8985

@@ -141,7 +137,7 @@ def _nc4_require_group(ds, group, mode, create_group=_netcdf4_create_group):
141137
return ds
142138
else:
143139
# make sure it's a string
144-
if not isinstance(group, basestring):
140+
if not isinstance(group, str):
145141
raise ValueError('group must be a string or None')
146142
# support path-like syntax
147143
path = group.strip('/').split('/')
@@ -392,7 +388,7 @@ def open_store_variable(self, name, var):
392388
def get_variables(self):
393389
dsvars = FrozenOrderedDict((k, self.open_store_variable(k, v))
394390
for k, v in
395-
iteritems(self.ds.variables))
391+
self.ds.variables.items())
396392
return dsvars
397393

398394
def get_attrs(self):
@@ -402,7 +398,7 @@ def get_attrs(self):
402398

403399
def get_dimensions(self):
404400
dims = FrozenOrderedDict((k, len(v))
405-
for k, v in iteritems(self.ds.dimensions))
401+
for k, v in self.ds.dimensions.items())
406402
return dims
407403

408404
def get_encoding(self):
@@ -467,7 +463,7 @@ def prepare_variable(self, name, variable, check_encoding=False,
467463
fill_value=fill_value)
468464
_disable_auto_decode_variable(nc4_var)
469465

470-
for k, v in iteritems(attrs):
466+
for k, v in attrs.items():
471467
# set attributes one-by-one since netCDF4<1.0.10 can't handle
472468
# OrderedDict as the input to setncatts
473469
_set_nc_attribute(nc4_var, k, v)

0 commit comments

Comments
 (0)