From 67edc1c23fbb2c881249497c8bb8772e20934685 Mon Sep 17 00:00:00 2001 From: Ismael Balafrej Date: Fri, 21 May 2021 15:51:06 -0400 Subject: [PATCH] Updated setup.py and code formatting for packaging --- LICENSE | 21 ++++++++ MANIFEST.in | 2 - README.md | 12 +++-- ebdataset/audio/ntidigits.py | 14 ++--- ebdataset/utils/__init__.py | 6 ++- ebdataset/vision/ibm_gesture.py | 32 +++-------- ebdataset/vision/ini_roshambo.py | 36 ++++--------- ebdataset/vision/nmnist.py | 10 ++-- ebdataset/vision/transforms.py | 8 +-- ebdataset/vision/type.py | 19 ++----- ebdataset/visualization/__init__.py | 0 .../visualization}/spike_train_to_vid.py | 28 +++------- .../visualization}/time_binning.py | 29 +++++----- pyproject.toml | 6 +++ setup.py | 54 +++++++++++-------- 15 files changed, 125 insertions(+), 152 deletions(-) create mode 100644 LICENSE delete mode 100644 MANIFEST.in create mode 100644 ebdataset/visualization/__init__.py rename {visualization => ebdataset/visualization}/spike_train_to_vid.py (71%) rename {visualization => ebdataset/visualization}/time_binning.py (70%) create mode 100644 pyproject.toml diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..ee2b29f --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Ismael Balafrej + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 1bbfea5..0000000 --- a/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include visualization * -recursive-include ebdataset * diff --git a/README.md b/README.md index 45be9b0..7671c94 100644 --- a/README.md +++ b/README.md @@ -6,9 +6,9 @@ An event based dataset loader under one common python (>=3.5) API built on top o # Supported datasets 1. Neuromorphic Mnist dataset from -Orchard, G.; Cohen, G.; Jayawant, A.; and Thakor, N. -“Converting Static Image Datasets to Spiking Neuromorphic Datasets Using Saccades", -Frontiers in Neuroscience, vol.9, no.437, Oct. 2015. Available for download at https://www.garrickorchard.com/datasets/n-mnist + Orchard, G.; Cohen, G.; Jayawant, A.; and Thakor, N. + “Converting Static Image Datasets to Spiking Neuromorphic Datasets Using Saccades", + Frontiers in Neuroscience, vol.9, no.437, Oct. 2015. Available for download at https://www.garrickorchard.com/datasets/n-mnist 2. NCaltech101 dataset from Orchard, G.; Cohen, G.; Jayawant, A.; and Thakor, N. @@ -42,7 +42,7 @@ Frontiers in Neuroscience, vol.9, no.437, Oct. 2015. Available for download at h # Installation You can install the latest version of this package with: ```bash -pip install git+https://github.com/tihbe/python-ebdataset.git +pip install ebdataset ``` # Getting started @@ -72,3 +72,7 @@ python -m ebdataset.visualization.spike_train_to_vid NMnist path ``` ![](images/nmnist-2.gif) ![](images/nmnist-9.gif) + +# Contributing + +Feel free to create a pull request if you're interested in this project. diff --git a/ebdataset/audio/ntidigits.py b/ebdataset/audio/ntidigits.py index 66a7126..8ef7c23 100644 --- a/ebdataset/audio/ntidigits.py +++ b/ebdataset/audio/ntidigits.py @@ -12,9 +12,7 @@ class NTidigits(data.Dataset): Available for download at https://docs.google.com/document/d/1Uxe7GsKKXcy6SlDUX4hoJVAC0-UkH-8kr5UXp0Ndi1M """ - def __init__( - self, path: str, is_train=True, transforms=None, only_single_digits=False - ): + def __init__(self, path: str, is_train=True, transforms=None, only_single_digits=False): assert os.path.exists(path) self.prename = "train" if is_train else "test" self.path = path @@ -24,11 +22,7 @@ def __init__( self.samples = f[self.prename + "_labels"][()] if only_single_digits: - self.samples = list( - filter( - lambda s: len(NTidigits._get_label_for_sample(s)) == 1, self.samples - ) - ) + self.samples = list(filter(lambda s: len(NTidigits._get_label_for_sample(s)) == 1, self.samples)) @staticmethod def _get_label_for_sample(sample_id): @@ -43,9 +37,7 @@ def __getitem__(self, index): addresses = f[self.prename + "_addresses"][sample_id][()] ts = f[self.prename + "_timestamps"][sample_id][()] - sparse_spike_train = np.recarray( - shape=len(ts), dtype=[("addr", addresses.dtype), ("ts", ts.dtype)] - ) + sparse_spike_train = np.recarray(shape=len(ts), dtype=[("addr", addresses.dtype), ("ts", ts.dtype)]) sparse_spike_train.addr = addresses sparse_spike_train.ts = ts diff --git a/ebdataset/utils/__init__.py b/ebdataset/utils/__init__.py index 1d660e6..a7ef6d5 100644 --- a/ebdataset/utils/__init__.py +++ b/ebdataset/utils/__init__.py @@ -28,7 +28,11 @@ def unzip(zip_file_path, output_directory, verbose=True, desc="Extracting"): with ZipFile(zip_file_path, "r") as zf: size = sum((f.file_size for f in zf.infolist())) with tqdm( - total=size, unit="B", unit_scale=True, desc=desc, disable=not verbose, + total=size, + unit="B", + unit_scale=True, + desc=desc, + disable=not verbose, ) as pbar: for file in zf.infolist(): if file.is_dir(): diff --git a/ebdataset/vision/ibm_gesture.py b/ebdataset/vision/ibm_gesture.py index 754bf35..b3df595 100644 --- a/ebdataset/vision/ibm_gesture.py +++ b/ebdataset/vision/ibm_gesture.py @@ -61,15 +61,11 @@ def __init__(self, path: str, shuffle: bool = True): # Read train trials file with open(os.path.join(path, self._TRAIN_TRIALS_FILE), "r") as f: - self._TRAIN_FILES = map( - lambda d: os.path.join(path, d.rstrip()), f.readlines() - ) + self._TRAIN_FILES = map(lambda d: os.path.join(path, d.rstrip()), f.readlines()) # Read test trials file with open(os.path.join(path, self._TEST_TRIALS_FILE), "r") as f: - self._TEST_FILES = map( - lambda d: os.path.join(path, d.rstrip()), f.readlines() - ) + self._TEST_FILES = map(lambda d: os.path.join(path, d.rstrip()), f.readlines()) self._TRAIN_FILES = list(filter(lambda f: os.path.isfile(f), self._TRAIN_FILES)) self._TEST_FILES = list(filter(lambda f: os.path.isfile(f), self._TEST_FILES)) @@ -80,9 +76,7 @@ def __init__(self, path: str, shuffle: bool = True): def _read_labels(self, file: str) -> np.array: assert os.path.exists(file), "File %s doesn't exist" % file - return np.genfromtxt( - file, delimiter=",", skip_header=1, dtype=self._LABELS_DTYPE - ) + return np.genfromtxt(file, delimiter=",", skip_header=1, dtype=self._LABELS_DTYPE) def _parse_filename(self, file: str) -> Tuple[str, str, str]: trial = re.search(r"^user([0-9]+)_(.+)\.(aedat|csv)$", file, re.IGNORECASE) @@ -108,13 +102,9 @@ def _create_generator(self, files: List[str]): labels = self._read_labels(file.replace(".aedat", "_labels.csv")) multilabel_spike_train = readAEDATv3(file) for (label_id, start_time, end_time) in labels: - event_mask = (multilabel_spike_train.ts >= start_time) & ( - multilabel_spike_train.ts < end_time - ) + event_mask = (multilabel_spike_train.ts >= start_time) & (multilabel_spike_train.ts < end_time) ts = multilabel_spike_train.ts[event_mask] - start_time - spike_train = DVSSpikeTrain( - ts.size, width=128, height=128, duration=end_time - start_time + 1 - ) + spike_train = DVSSpikeTrain(ts.size, width=128, height=128, duration=end_time - start_time + 1) spike_train.ts = ts spike_train.x = multilabel_spike_train.x[event_mask] spike_train.y = multilabel_spike_train.y[event_mask] @@ -165,9 +155,7 @@ def __init__(self, path: str, is_train: bool = True): """ _, file_extension = os.path.splitext(path) if file_extension != ".h5": - raise Exception( - "The dvs gesture must first be converted to a .h5 file. Please call H5DvsGesture.Convert" - ) + raise Exception("The dvs gesture must first be converted to a .h5 file. Please call H5DvsGesture.Convert") self.indx = 0 if is_train else 1 self.file_path = path @@ -189,9 +177,7 @@ def convert(dvs_folder_path: str, h5_output_path: str, verbose=True): position_type = h5py.vlen_dtype(np.dtype("uint16")) time_type = h5py.vlen_dtype(np.dtype("uint32")) - step_counter = tqdm( - total=sum(H5IBMGesture._nb_of_samples), disable=(not verbose) - ) + step_counter = tqdm(total=sum(H5IBMGesture._nb_of_samples), disable=(not verbose)) with h5py.File(h5_output_path, "w-") as f: for (name, gen, length) in zip( @@ -223,9 +209,7 @@ def __getitem__(self, index): tos = file_hndl[name + "_tos"][index] label = file_hndl[name + "_label"][index] - spike_train = DVSSpikeTrain( - tos.size, width=128, height=128, duration=tos.max() + 1 - ) + spike_train = DVSSpikeTrain(tos.size, width=128, height=128, duration=tos.max() + 1) spike_train.x = pos[0] spike_train.y = pos[1] spike_train.p = pos[2] diff --git a/ebdataset/vision/ini_roshambo.py b/ebdataset/vision/ini_roshambo.py index 534f20e..74b9eea 100644 --- a/ebdataset/vision/ini_roshambo.py +++ b/ebdataset/vision/ini_roshambo.py @@ -30,9 +30,7 @@ def __init__(self, path: str, with_backgrounds=False, transforms=None): if os.path.isdir(path): # AEDat v2 directory self.backend = "aedat" - self.samples = filter( - lambda f: os.path.splitext(f)[1] == ".aedat", os.listdir(path) - ) + self.samples = filter(lambda f: os.path.splitext(f)[1] == ".aedat", os.listdir(path)) elif os.path.splitext(path)[1] == ".h5": self.backend = "h5" with File(path, "r", libver="latest") as f_hndl: @@ -54,9 +52,7 @@ def convert(self, out_path, verbose=False): :return: New Roshambo object with h5 file as backend """ - if ( - self.backend == "h5" - ): # Send back object if we're already using an h5 backend + if self.backend == "h5": # Send back object if we're already using an h5 backend return self if not (".h5" in out_path): @@ -64,17 +60,11 @@ def convert(self, out_path, verbose=False): with File(out_path, "w-", libver="latest") as f_hndl: for sample_id in tqdm(self.samples, disable=not verbose): - sparse_spike_train = readAEDATv2_davies( - os.path.join(self.path, sample_id) - ) - sparse_spike_train.ts = sparse_spike_train.ts - np.min( - sparse_spike_train.ts - ) # Start the sample at t=0 + sparse_spike_train = readAEDATv2_davies(os.path.join(self.path, sample_id)) + sparse_spike_train.ts = sparse_spike_train.ts - np.min(sparse_spike_train.ts) # Start the sample at t=0 f_hndl[sample_id] = sparse_spike_train - return INIRoshambo( - out_path, with_backgrounds=self.with_backgrounds, transforms=self.transforms - ) + return INIRoshambo(out_path, with_backgrounds=self.with_backgrounds, transforms=self.transforms) def split_to_subsamples(self, out_path, duration_per_sample, verbose=False): if not (".h5" in out_path): @@ -96,15 +86,11 @@ def split_to_subsamples(self, out_path, duration_per_sample, verbose=False): ): if start_time + duration_per_sample > total_duration: # End break - sub_mask = (sample.ts >= start_time) & ( - sample.ts < start_time + duration_per_sample - ) + sub_mask = (sample.ts >= start_time) & (sample.ts < start_time + duration_per_sample) nb_of_spikes = np.sum(sub_mask) if nb_of_spikes <= 10: continue - sub_sample = DVSSpikeTrain( - nb_of_spikes, duration=duration_per_sample - ) + sub_sample = DVSSpikeTrain(nb_of_spikes, duration=duration_per_sample) sub_sample.ts = sample.ts[sub_mask] sub_sample.ts = sub_sample.ts - np.min(sub_sample.ts) # Start at 0 sub_sample.x = sample.x[sub_mask] @@ -121,15 +107,11 @@ def __getitem__(self, index): if self.backend == "aedat": filename = os.path.join(self.path, sample_id) sparse_spike_train = readAEDATv2_davies(filename) - sparse_spike_train.ts = sparse_spike_train.ts - np.min( - sparse_spike_train.ts - ) # Start the sample at t=0 + sparse_spike_train.ts = sparse_spike_train.ts - np.min(sparse_spike_train.ts) # Start the sample at t=0 elif self.backend == "h5": with File(self.path, "r", libver="latest") as f_hndl: sparse_spike_train = f_hndl[sample_id][()] - sparse_spike_train = np.rec.array( - sparse_spike_train, dtype=sparse_spike_train.dtype - ).view(DVSSpikeTrain) + sparse_spike_train = np.rec.array(sparse_spike_train, dtype=sparse_spike_train.dtype).view(DVSSpikeTrain) sparse_spike_train.width = 240 sparse_spike_train.height = 180 diff --git a/ebdataset/vision/nmnist.py b/ebdataset/vision/nmnist.py index 51cf866..cea47f7 100644 --- a/ebdataset/vision/nmnist.py +++ b/ebdataset/vision/nmnist.py @@ -56,10 +56,12 @@ def _download_and_unzip(self, output_directory): test_url = "https://www.dropbox.com/sh/tg2ljlbmtzygrag/AADSKgJ2CjaBWh75HnTNZyhca/Test.zip?dl=1" train_loc = os.path.join(output_directory, "Train%i.zip" % time.time()) test_loc = os.path.join(output_directory, "Test%i.zip" % time.time()) - success = download(train_url, train_loc, desc="Downloading training files") and \ - unzip(train_loc, output_directory, desc="Extracting training files") and \ - download(test_url, test_loc, desc="Downloading test files") and \ - unzip(test_loc, output_directory, desc="Extracting test files") + success = ( + download(train_url, train_loc, desc="Downloading training files") + and unzip(train_loc, output_directory, desc="Extracting training files") + and download(test_url, test_loc, desc="Downloading test files") + and unzip(test_loc, output_directory, desc="Extracting test files") + ) if success: os.remove(train_loc) diff --git a/ebdataset/vision/transforms.py b/ebdataset/vision/transforms.py index a513f82..4013460 100644 --- a/ebdataset/vision/transforms.py +++ b/ebdataset/vision/transforms.py @@ -8,8 +8,7 @@ class ScaleDown(object): - """Scale down a 2d sparse spike train by factor (both in x and y) - """ + """Scale down a 2d sparse spike train by factor (both in x and y)""" def __init__(self, width, height, factor): self.authorized_x = list(range(0, width, factor)) @@ -65,10 +64,7 @@ def __call__(self, sparse_spike_train): ts = (sparse_spike_train.ts * time_scale).astype(int) dense_spike_train[ - sparse_spike_train.x.astype(int), - sparse_spike_train.y.astype(int), - sparse_spike_train.p.astype(int), - ts + sparse_spike_train.x.astype(int), sparse_spike_train.y.astype(int), sparse_spike_train.p.astype(int), ts ] = 1 return dense_spike_train diff --git a/ebdataset/vision/type.py b/ebdataset/vision/type.py index e8630a6..25cddd8 100644 --- a/ebdataset/vision/type.py +++ b/ebdataset/vision/type.py @@ -1,8 +1,6 @@ import numpy as np -_dtype = np.dtype( - [("x", np.uint16), ("y", np.uint16), ("p", np.bool_), ("ts", np.uint64)] -) +_dtype = np.dtype([("x", np.uint16), ("y", np.uint16), ("p", np.bool_), ("ts", np.uint64)]) class DVSSpikeTrain(np.recarray): @@ -10,19 +8,8 @@ class DVSSpikeTrain(np.recarray): __name__ = "SparseVisionSpikeTrain" - def __new__( - cls, - nb_of_spikes, - *args, - width=-1, - height=-1, - duration=-1, - time_scale=1e-6, - **nargs - ): - obj = super(DVSSpikeTrain, cls).__new__( - cls, nb_of_spikes, dtype=_dtype, *args, **nargs - ) + def __new__(cls, nb_of_spikes, *args, width=-1, height=-1, duration=-1, time_scale=1e-6, **nargs): + obj = super(DVSSpikeTrain, cls).__new__(cls, nb_of_spikes, dtype=_dtype, *args, **nargs) obj.width = width obj.height = height obj.duration = duration diff --git a/ebdataset/visualization/__init__.py b/ebdataset/visualization/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/visualization/spike_train_to_vid.py b/ebdataset/visualization/spike_train_to_vid.py similarity index 71% rename from visualization/spike_train_to_vid.py rename to ebdataset/visualization/spike_train_to_vid.py index 03f4daa..538c6b1 100644 --- a/visualization/spike_train_to_vid.py +++ b/ebdataset/visualization/spike_train_to_vid.py @@ -28,16 +28,10 @@ NMnist, PropheseeNCars, ] -dataset_map = dict( - zip([dataset.__name__ for dataset in available_datasets], available_datasets) -) +dataset_map = dict(zip([dataset.__name__ for dataset in available_datasets], available_datasets)) -parser.add_argument( - "dataset", help="Dataset - One of [%s]" % " | ".join(dataset_map.keys()) -) -parser.add_argument( - "path", help="Path of the data directory or file for the chosen dataset" -) +parser.add_argument("dataset", help="Dataset - One of [%s]" % " | ".join(dataset_map.keys())) +parser.add_argument("path", help="Path of the data directory or file for the chosen dataset") parser.add_argument( "-n", "--num_samples", @@ -45,12 +39,8 @@ type=int, default=10, ) -parser.add_argument( - "-d", "--dilatation", help="Time dilatation scale", type=float, default=1.0 -) # Default Real time -parser.add_argument( - "-s", "--scale", help="Spatial scaling", type=float, default=1.0 -) # Default Real size +parser.add_argument("-d", "--dilatation", help="Time dilatation scale", type=float, default=1.0) # Default Real time +parser.add_argument("-s", "--scale", help="Spatial scaling", type=float, default=1.0) # Default Real size args = parser.parse_args() dataset = dataset_map[args.dataset] @@ -70,17 +60,13 @@ int(spike_train.height * spatial_scale), ) out_duration = spike_train.duration * spike_train.time_scale * time_scale - out = cv2.VideoWriter( - filename, cv2.VideoWriter_fourcc(*"MP42"), 60.0, (out_width, out_height) - ) + out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc(*"MP42"), 60.0, (out_width, out_height)) for frame_start in np.arange(0.0, out_duration, 1 / 60.0): frame_end = frame_start + 1 / 60.0 ts = spike_train.ts * spike_train.time_scale * time_scale mask = (ts >= frame_start) & (ts < frame_end) frame = np.zeros((out_width, out_height, 3), dtype=np.uint8) - for x, y, p in zip( - spike_train.x[mask], spike_train.y[mask], spike_train.p[mask] - ): + for x, y, p in zip(spike_train.x[mask], spike_train.y[mask], spike_train.p[mask]): frame[ int(x * spatial_scale) : int((x + 1) * spatial_scale), int(y * spatial_scale) : int((y + 1) * spatial_scale), diff --git a/visualization/time_binning.py b/ebdataset/visualization/time_binning.py similarity index 70% rename from visualization/time_binning.py rename to ebdataset/visualization/time_binning.py index 8bce191..25687ce 100644 --- a/visualization/time_binning.py +++ b/ebdataset/visualization/time_binning.py @@ -13,7 +13,6 @@ PropheseeNCars, ) from ebdataset.audio import NTidigits -import seaborn as sns assert __name__ == "__main__", "This script is meant to be run as main" @@ -30,21 +29,23 @@ PropheseeNCars, NTidigits, ] -dataset_map = dict( - zip([dataset.__name__ for dataset in available_datasets], available_datasets) -) +dataset_map = dict(zip([dataset.__name__ for dataset in available_datasets], available_datasets)) +parser.add_argument("dataset", help="Dataset - One of [%s]" % " | ".join(dataset_map.keys())) +parser.add_argument("path", help="Path of the data directory or file for the chosen dataset") parser.add_argument( - "dataset", help="Dataset - One of [%s]" % " | ".join(dataset_map.keys()) -) -parser.add_argument( - "path", help="Path of the data directory or file for the chosen dataset" -) -parser.add_argument( - "-i", "--id", help="Sample ID", type=int, default=-1, + "-i", + "--id", + help="Sample ID", + type=int, + default=-1, ) parser.add_argument( - "-b", "--bin_size", help="Size of the bin (ms)", type=float, default=1.0, + "-b", + "--bin_size", + help="Size of the bin (ms)", + type=float, + default=1.0, ) args = parser.parse_args() @@ -60,11 +61,9 @@ time_scale = getattr(spike_train, "time_scale", 1) nb_bins = np.ceil(duration * time_scale * (1000.0 / args.bin_size)).astype(int) -sns.set() fig, ax = plt.subplots() ax.set_title( - "Spike counts binned with size %.2f ms for dataset %s sample #%i" - % (args.bin_size, args.dataset, sample_id) + "Spike counts binned with size %.2f ms for dataset %s sample #%i" % (args.bin_size, args.dataset, sample_id) ) ax.hist(spike_train.ts * time_scale, bins=nb_bins) plt.show() diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..b5a3c46 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,6 @@ +[build-system] +requires = [ + "setuptools>=42", + "wheel" +] +build-backend = "setuptools.build_meta" \ No newline at end of file diff --git a/setup.py b/setup.py index 313fc4a..959623e 100644 --- a/setup.py +++ b/setup.py @@ -1,21 +1,33 @@ -from distutils.core import setup - -setup( - name="ebdataset", - version="0.0.1", - description="An event based dataset loader under one common API.", - author="Ismael Balafrej - NECOTIS", - author_email="ismael dot balafrej at usherbrooke dot ca", - url="http://www.gel.usherbrooke.ca/necotis/", - packages=["ebdataset", "ebdataset.vision", "ebdataset.audio", "ebdataset.vision.parsers", "ebdataset.utils"], - install_requires=[ - "numpy>=1.14.3", - "opencv-python>=4.2.0", - "quantities>=0.12.4", - "tqdm>=4.45.0", - "torch>=1.4.0", - "torchvision>=0.5.0", - "h5py>=2.10.0", - ], - python_requires=">=3.5.2", -) +from setuptools import setup, find_packages + + +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + + +setup( + name="ebdataset", + version="0.0.1", + author="Ismael Balafrej - NECOTIS", + author_email="ismael.balafrej@usherbrooke.ca", + description="An event based dataset loader under one common API.", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/tihbe/python-ebdataset", + packages=find_packages(), + install_requires=[ + "numpy>=1.14.3", + "quantities>=0.12.4", + "tqdm>=4.45.0", + "torch>=1.4.0", + "torchvision>=0.5.0", + "h5py>=2.10.0", + ], + python_requires=">=3.5.2", + classifiers=[ + "Development Status :: 3 - Alpha", + "Programming Language :: Python :: 3 :: Only", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], +)