Skip to content

Commit

Permalink
Remove all use of OrderedDict (#5890)
Browse files Browse the repository at this point in the history
  • Loading branch information
hoxbro authored Oct 3, 2023
1 parent dfc30e7 commit 854e300
Show file tree
Hide file tree
Showing 51 changed files with 174 additions and 201 deletions.
2 changes: 1 addition & 1 deletion holoviews/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@
from . import util # noqa (API import)
from .core import archive, config # noqa (API import)
from .core.boundingregion import BoundingBox # noqa (API import)
from .core.dimension import OrderedDict, Dimension # noqa (API import)
from .core.dimension import Dimension # noqa (API import)
from .core.element import Element, Collator # noqa (API import)
from .core.layout import (Layout, NdLayout, Empty, # noqa (API import)
AdjointLayout)
Expand Down
3 changes: 1 addition & 2 deletions holoviews/annotators.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import sys

from collections import OrderedDict
from inspect import getmro

import param
Expand Down Expand Up @@ -76,7 +75,7 @@ class annotate(param.ParameterizedFunction):
vertex_style = param.Dict(default={'nonselection_alpha': 0.5}, doc="""
Options to apply to vertices during drawing and editing.""")

_annotator_types = OrderedDict()
_annotator_types = {}

@property
def annotated(self):
Expand Down
7 changes: 3 additions & 4 deletions holoviews/core/accessors.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import copy
import sys

from collections import OrderedDict
from functools import wraps
from types import FunctionType

Expand Down Expand Up @@ -435,7 +434,7 @@ def __call__(self, specs=None, **dimensions):
def dynamic_redim(obj, **dynkwargs):
return obj.redim(specs, **dimensions)
dmap = Dynamic(obj, streams=obj.streams, operation=dynamic_redim)
dmap.data = OrderedDict(self._filter_cache(redimmed, kdims))
dmap.data = dict(self._filter_cache(redimmed, kdims))
with util.disable_constant(dmap):
dmap.kdims = kdims
dmap.vdims = vdims
Expand Down Expand Up @@ -597,7 +596,7 @@ def info(self, show_defaults=False):

def _holomap_opts(self, *args, clone=None, **kwargs):
apply_groups, _, _ = util.deprecated_opts_signature(args, kwargs)
data = OrderedDict([(k, v.opts(*args, **kwargs))
data = dict([(k, v.opts(*args, **kwargs))
for k, v in self._obj.data.items()])

# By default do not clone in .opts method
Expand All @@ -623,7 +622,7 @@ def _dynamicmap_opts(self, *args, **kwargs):
obj.callback = self._obj.callback
self._obj.callback = dmap.callback
dmap = self._obj
dmap.data = OrderedDict([(k, v.opts(*args, **kwargs))
dmap.data = dict([(k, v.opts(*args, **kwargs))
for k, v in self._obj.data.items()])
return dmap

Expand Down
12 changes: 6 additions & 6 deletions holoviews/core/data/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
Dimension, Dimensioned, LabelledData, dimension_name, process_dimensions
)
from ..element import Element
from ..ndmapping import OrderedDict, MultiDimensionalMapping
from ..ndmapping import MultiDimensionalMapping
from ..spaces import HoloMap, DynamicMap
from .. import util as core_util

Expand Down Expand Up @@ -1019,11 +1019,11 @@ def transform(self, *args, **kwargs):
"""
drop = kwargs.pop('drop', False)
keep_index = kwargs.pop('keep_index', True)
transforms = OrderedDict()
transforms = {}
for s, transform in list(args)+list(kwargs.items()):
transforms[core_util.wrap_tuple(s)] = transform

new_data = OrderedDict()
new_data = {}
for signature, transform in transforms.items():
applied = transform.apply(
self, compute=False, keep_index=keep_index
Expand All @@ -1046,10 +1046,10 @@ def transform(self, *args, **kwargs):
if drop:
kdims = [ds.get_dimension(d) for d in new_data if d in ds.kdims]
vdims = [ds.get_dimension(d) or d for d in new_data if d not in ds.kdims]
data = OrderedDict([(dimension_name(d), values) for d, values in new_data.items()])
data = dict([(dimension_name(d), values) for d, values in new_data.items()])
return ds.clone(data, kdims=kdims, vdims=vdims)
else:
new_data = OrderedDict([(dimension_name(d), values) for d, values in new_data.items()])
new_data = dict([(dimension_name(d), values) for d, values in new_data.items()])
data = ds.interface.assign(ds, new_data)
data, drop = data if isinstance(data, tuple) else (data, [])
kdims = [kd for kd in self.kdims if kd.name not in drop]
Expand Down Expand Up @@ -1149,7 +1149,7 @@ def columns(self, dimensions=None):
dimensions = self.dimensions()
else:
dimensions = [self.get_dimension(d, strict=True) for d in dimensions]
return OrderedDict([(d.name, self.dimension_values(d)) for d in dimensions])
return dict([(d.name, self.dimension_values(d)) for d in dimensions])


@property
Expand Down
4 changes: 2 additions & 2 deletions holoviews/core/data/dask.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from .. import util
from ..dimension import Dimension
from ..element import Element
from ..ndmapping import NdMapping, item_check, OrderedDict, sorted_context
from ..ndmapping import NdMapping, item_check, sorted_context
from .interface import Interface
from .pandas import PandasInterface

Expand Down Expand Up @@ -310,7 +310,7 @@ def iloc(cls, dataset, index):
if np.isscalar(rows):
rows = [rows]

data = OrderedDict()
data = {}
for c in cols:
data[c] = dataset.data[c].compute().iloc[rows].values
if scalar:
Expand Down
34 changes: 17 additions & 17 deletions holoviews/core/data/dictionary.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from collections import OrderedDict, defaultdict
from collections import defaultdict

import numpy as np

Expand All @@ -18,7 +18,7 @@ class DictInterface(Interface):
are collections representing the values in that column.
"""

types = (dict, OrderedDict)
types = (dict,)

datatype = 'dictionary'

Expand Down Expand Up @@ -52,7 +52,7 @@ def init(cls, eltype, data, kdims, vdims):
data = np.atleast_2d(data).T
data = {k: data[:,i] for i,k in enumerate(dimensions)}
elif isinstance(data, list) and data == []:
data = OrderedDict([(d, []) for d in dimensions])
data = dict([(d, []) for d in dimensions])
elif isinstance(data, list) and isscalar(data[0]):
if eltype._auto_indexable_1d:
data = {dimensions[0]: np.arange(len(data)), dimensions[1]: data}
Expand Down Expand Up @@ -109,10 +109,10 @@ def init(cls, eltype, data, kdims, vdims):

if not cls.expanded([vs for d, vs in unpacked if d in dimensions and not isscalar(vs)]):
raise ValueError('DictInterface expects data to be of uniform shape.')
if isinstance(data, OrderedDict):
if isinstance(data, dict):
data.update(unpacked)
else:
data = OrderedDict(unpacked)
data = dict(unpacked)

return data, {'kdims':kdims, 'vdims':vdims}, {}

Expand Down Expand Up @@ -189,7 +189,7 @@ def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
dim = dimension_name(dimension)
data = list(dataset.data.items())
data.insert(dim_pos, (dim, values))
return OrderedDict(data)
return dict(data)

@classmethod
def redim(cls, dataset, dimensions):
Expand All @@ -201,7 +201,7 @@ def redim(cls, dataset, dimensions):
elif k in all_dims:
k = dataset.get_dimension(k).name
renamed.append((k, v))
return OrderedDict(renamed)
return dict(renamed)


@classmethod
Expand All @@ -215,12 +215,12 @@ def concat(cls, datasets, dimensions, vdims):

template = datasets[0][1]
dims = dimensions+template.dimensions()
return OrderedDict([(d.name, np.concatenate(columns[d.name])) for d in dims])
return dict([(d.name, np.concatenate(columns[d.name])) for d in dims])


@classmethod
def mask(cls, dataset, mask, mask_value=np.nan):
masked = OrderedDict(dataset.data)
masked = dict(dataset.data)
for vd in dataset.vdims:
new_array = np.copy(dataset.data[vd.name])
new_array[mask] = mask_value
Expand All @@ -236,7 +236,7 @@ def sort(cls, dataset, by=[], reverse=False):
else:
arrays = [dataset.dimension_values(d) for d in by]
sorting = util.arglexsort(arrays)
return OrderedDict([(d, v if isscalar(v) else (v[sorting][::-1] if reverse else v[sorting]))
return dict([(d, v if isscalar(v) else (v[sorting][::-1] if reverse else v[sorting]))
for d, v in dataset.data.items()])


Expand Down Expand Up @@ -266,15 +266,15 @@ def values(cls, dataset, dim, expanded=True, flat=True, compute=True, keep_index

@classmethod
def assign(cls, dataset, new_data):
data = OrderedDict(dataset.data)
data = dict(dataset.data)
data.update(new_data)
return data


@classmethod
def reindex(cls, dataset, kdims, vdims):
dimensions = [dataset.get_dimension(d).name for d in kdims+vdims]
return OrderedDict([(d, dataset.dimension_values(d))
return dict([(d, dataset.dimension_values(d))
for d in dimensions])


Expand Down Expand Up @@ -302,7 +302,7 @@ def groupby(cls, dataset, dimensions, container_type, group_type, **kwargs):
grouped_data = []
for unique_key in util.unique_iterator(keys):
mask = cls.select_mask(dataset, dict(zip(dimensions, unique_key)))
group_data = OrderedDict((d.name, dataset.data[d.name] if isscalar(dataset.data[d.name])
group_data = dict((d.name, dataset.data[d.name] if isscalar(dataset.data[d.name])
else dataset.data[d.name][mask])
for d in kdims+vdims)
group_data = group_type(group_data, **group_kwargs)
Expand All @@ -325,7 +325,7 @@ def select(cls, dataset, selection_mask=None, **selection):
return {d.name: np.array([], dtype=cls.dtype(dataset, d))
for d in dimensions}
indexed = cls.indexed(dataset, selection)
data = OrderedDict()
data = {}
for k, v in dataset.data.items():
if k not in dimensions or isscalar(v):
data[k] = v
Expand Down Expand Up @@ -355,8 +355,8 @@ def sample(cls, dataset, samples=[]):
def aggregate(cls, dataset, kdims, function, **kwargs):
kdims = [dataset.get_dimension(d, strict=True).name for d in kdims]
vdims = dataset.dimensions('value', label='name')
groups = cls.groupby(dataset, kdims, list, OrderedDict)
aggregated = OrderedDict([(k, []) for k in kdims+vdims])
groups = cls.groupby(dataset, kdims, list, dict)
aggregated = dict([(k, []) for k in kdims+vdims])

dropped = []
for key, group in groups:
Expand Down Expand Up @@ -394,7 +394,7 @@ def iloc(cls, dataset, index):
if isscalar(rows):
rows = [rows]

new_data = OrderedDict()
new_data = {}
for d, values in dataset.data.items():
if d in cols:
if isscalar(values):
Expand Down
16 changes: 8 additions & 8 deletions holoviews/core/data/grid.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from collections import OrderedDict, defaultdict
from collections import defaultdict

import numpy as np

Expand Down Expand Up @@ -28,7 +28,7 @@ class GridInterface(DictInterface):
longitudes can specify the position of NxM temperature samples.
"""

types = (dict, OrderedDict)
types = (dict,)

datatype = 'grid'

Expand Down Expand Up @@ -59,9 +59,9 @@ def init(cls, eltype, data, kdims, vdims):
data = {d: v for d, v in zip(dimensions, data)}
elif (isinstance(data, list) and data == []):
if len(kdims) == 1:
data = OrderedDict([(d, []) for d in dimensions])
data = dict([(d, []) for d in dimensions])
else:
data = OrderedDict([(d.name, np.array([])) for d in kdims])
data = dict([(d.name, np.array([])) for d in kdims])
if len(vdims) == 1:
data[vdims[0].name] = np.zeros((0, 0))
else:
Expand All @@ -72,11 +72,11 @@ def init(cls, eltype, data, kdims, vdims):
elif isinstance(data, np.ndarray):
if data.shape == (0, 0) and len(vdims) == 1:
array = data
data = OrderedDict([(d.name, np.array([])) for d in kdims])
data = dict([(d.name, np.array([])) for d in kdims])
data[vdims[0].name] = array
elif data.shape == (0, 0, len(vdims)):
array = data
data = OrderedDict([(d.name, np.array([])) for d in kdims])
data = dict([(d.name, np.array([])) for d in kdims])
data[vdim_tuple] = array
else:
if data.ndim == 1:
Expand Down Expand Up @@ -621,7 +621,7 @@ def select(cls, dataset, selection_mask=None, **selection):
def mask(cls, dataset, mask, mask_val=np.nan):
mask = cls.canonicalize(dataset, mask)
packed = cls.packed(dataset)
masked = OrderedDict(dataset.data)
masked = dict(dataset.data)
if packed:
masked = dataset.data[packed].copy()
try:
Expand Down Expand Up @@ -804,7 +804,7 @@ def range(cls, dataset, dimension):

@classmethod
def assign(cls, dataset, new_data):
data = OrderedDict(dataset.data)
data = dict(dataset.data)
for k, v in new_data.items():
if k in dataset.kdims:
coords = cls.coords(dataset, k)
Expand Down
5 changes: 2 additions & 3 deletions holoviews/core/data/pandas.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from collections import OrderedDict
from packaging.version import Version

import numpy as np
Expand Down Expand Up @@ -107,7 +106,7 @@ def init(cls, eltype, data, kdims, vdims):
columns = list(util.unique_iterator([dimension_name(d) for d in kdims+vdims]))

if isinstance(data, dict) and all(c in data for c in columns):
data = OrderedDict((d, data[d]) for d in columns)
data = dict((d, data[d]) for d in columns)
elif isinstance(data, list) and len(data) == 0:
data = {c: np.array([]) for c in columns}
elif isinstance(data, (list, dict)) and data in ([], {}):
Expand All @@ -122,7 +121,7 @@ def init(cls, eltype, data, kdims, vdims):
"values.")
column_data = zip(*((util.wrap_tuple(k)+util.wrap_tuple(v))
for k, v in column_data))
data = OrderedDict(((c, col) for c, col in zip(columns, column_data)))
data = dict(((c, col) for c, col in zip(columns, column_data)))
elif isinstance(data, np.ndarray):
if data.ndim == 1:
if eltype._auto_indexable_1d and len(kdims)+len(vdims)>1:
Expand Down
9 changes: 4 additions & 5 deletions holoviews/core/data/xarray.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import sys
import types

from collections import OrderedDict

import numpy as np
import pandas as pd
Expand Down Expand Up @@ -158,7 +157,7 @@ def retrieve_unit_and_label(dim):
for d, values in data.items()}
coord_dims = [data[kd.name].ndim for kd in kdims]
dims = tuple('dim_%d' % i for i in range(max(coord_dims)))[::-1]
coords = OrderedDict()
coords = {}
for kd in kdims:
coord_vals = data[kd.name]
if coord_vals.ndim > 1:
Expand Down Expand Up @@ -599,7 +598,7 @@ def select(cls, dataset, selection_mask=None, **selection):

# Restore constant dimensions
indexed = cls.indexed(dataset, selection)
dropped = OrderedDict((d.name, np.atleast_1d(data[d.name]))
dropped = dict((d.name, np.atleast_1d(data[d.name]))
for d in dataset.kdims
if not data[d.name].data.shape)
if dropped and not indexed:
Expand Down Expand Up @@ -671,7 +670,7 @@ def assign(cls, dataset, new_data):
prev_coords = set.intersection(*[
set(var.coords) for var in data.data_vars.values()
])
coords = OrderedDict()
coords = {}
for k, v in new_data.items():
if k not in dataset.kdims:
continue
Expand All @@ -686,7 +685,7 @@ def assign(cls, dataset, new_data):
data = data.assign_coords(**coords)

dims = tuple(kd.name for kd in dataset.kdims[::-1])
vars = OrderedDict()
vars = {}
for k, v in new_data.items():
if k in dataset.kdims:
continue
Expand Down
Loading

0 comments on commit 854e300

Please sign in to comment.