Skip to content

Commit

Permalink
Updated setup.py and code formatting for packaging
Browse files Browse the repository at this point in the history
  • Loading branch information
Ismael Balafrej committed May 21, 2021
1 parent a45b43a commit 67edc1c
Show file tree
Hide file tree
Showing 15 changed files with 125 additions and 152 deletions.
21 changes: 21 additions & 0 deletions LICENSE
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
MIT License

Copyright (c) 2021 Ismael Balafrej

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
2 changes: 0 additions & 2 deletions MANIFEST.in

This file was deleted.

12 changes: 8 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ An event based dataset loader under one common python (>=3.5) API built on top o
# Supported datasets

1. Neuromorphic Mnist dataset from
Orchard, G.; Cohen, G.; Jayawant, A.; and Thakor, N.
“Converting Static Image Datasets to Spiking Neuromorphic Datasets Using Saccades",
Frontiers in Neuroscience, vol.9, no.437, Oct. 2015. Available for download at https://www.garrickorchard.com/datasets/n-mnist
Orchard, G.; Cohen, G.; Jayawant, A.; and Thakor, N.
“Converting Static Image Datasets to Spiking Neuromorphic Datasets Using Saccades",
Frontiers in Neuroscience, vol.9, no.437, Oct. 2015. Available for download at https://www.garrickorchard.com/datasets/n-mnist

2. NCaltech101 dataset from
Orchard, G.; Cohen, G.; Jayawant, A.; and Thakor, N.
Expand Down Expand Up @@ -42,7 +42,7 @@ Frontiers in Neuroscience, vol.9, no.437, Oct. 2015. Available for download at h
# Installation
You can install the latest version of this package with:
```bash
pip install git+https://github.com/tihbe/python-ebdataset.git
pip install ebdataset
```

# Getting started
Expand Down Expand Up @@ -72,3 +72,7 @@ python -m ebdataset.visualization.spike_train_to_vid NMnist path
```

![](images/nmnist-2.gif) ![](images/nmnist-9.gif)

# Contributing

Feel free to create a pull request if you're interested in this project.
14 changes: 3 additions & 11 deletions ebdataset/audio/ntidigits.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,7 @@ class NTidigits(data.Dataset):
Available for download at https://docs.google.com/document/d/1Uxe7GsKKXcy6SlDUX4hoJVAC0-UkH-8kr5UXp0Ndi1M
"""

def __init__(
self, path: str, is_train=True, transforms=None, only_single_digits=False
):
def __init__(self, path: str, is_train=True, transforms=None, only_single_digits=False):
assert os.path.exists(path)
self.prename = "train" if is_train else "test"
self.path = path
Expand All @@ -24,11 +22,7 @@ def __init__(
self.samples = f[self.prename + "_labels"][()]

if only_single_digits:
self.samples = list(
filter(
lambda s: len(NTidigits._get_label_for_sample(s)) == 1, self.samples
)
)
self.samples = list(filter(lambda s: len(NTidigits._get_label_for_sample(s)) == 1, self.samples))

@staticmethod
def _get_label_for_sample(sample_id):
Expand All @@ -43,9 +37,7 @@ def __getitem__(self, index):
addresses = f[self.prename + "_addresses"][sample_id][()]
ts = f[self.prename + "_timestamps"][sample_id][()]

sparse_spike_train = np.recarray(
shape=len(ts), dtype=[("addr", addresses.dtype), ("ts", ts.dtype)]
)
sparse_spike_train = np.recarray(shape=len(ts), dtype=[("addr", addresses.dtype), ("ts", ts.dtype)])
sparse_spike_train.addr = addresses
sparse_spike_train.ts = ts

Expand Down
6 changes: 5 additions & 1 deletion ebdataset/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,11 @@ def unzip(zip_file_path, output_directory, verbose=True, desc="Extracting"):
with ZipFile(zip_file_path, "r") as zf:
size = sum((f.file_size for f in zf.infolist()))
with tqdm(
total=size, unit="B", unit_scale=True, desc=desc, disable=not verbose,
total=size,
unit="B",
unit_scale=True,
desc=desc,
disable=not verbose,
) as pbar:
for file in zf.infolist():
if file.is_dir():
Expand Down
32 changes: 8 additions & 24 deletions ebdataset/vision/ibm_gesture.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,15 +61,11 @@ def __init__(self, path: str, shuffle: bool = True):

# Read train trials file
with open(os.path.join(path, self._TRAIN_TRIALS_FILE), "r") as f:
self._TRAIN_FILES = map(
lambda d: os.path.join(path, d.rstrip()), f.readlines()
)
self._TRAIN_FILES = map(lambda d: os.path.join(path, d.rstrip()), f.readlines())

# Read test trials file
with open(os.path.join(path, self._TEST_TRIALS_FILE), "r") as f:
self._TEST_FILES = map(
lambda d: os.path.join(path, d.rstrip()), f.readlines()
)
self._TEST_FILES = map(lambda d: os.path.join(path, d.rstrip()), f.readlines())

self._TRAIN_FILES = list(filter(lambda f: os.path.isfile(f), self._TRAIN_FILES))
self._TEST_FILES = list(filter(lambda f: os.path.isfile(f), self._TEST_FILES))
Expand All @@ -80,9 +76,7 @@ def __init__(self, path: str, shuffle: bool = True):

def _read_labels(self, file: str) -> np.array:
assert os.path.exists(file), "File %s doesn't exist" % file
return np.genfromtxt(
file, delimiter=",", skip_header=1, dtype=self._LABELS_DTYPE
)
return np.genfromtxt(file, delimiter=",", skip_header=1, dtype=self._LABELS_DTYPE)

def _parse_filename(self, file: str) -> Tuple[str, str, str]:
trial = re.search(r"^user([0-9]+)_(.+)\.(aedat|csv)$", file, re.IGNORECASE)
Expand All @@ -108,13 +102,9 @@ def _create_generator(self, files: List[str]):
labels = self._read_labels(file.replace(".aedat", "_labels.csv"))
multilabel_spike_train = readAEDATv3(file)
for (label_id, start_time, end_time) in labels:
event_mask = (multilabel_spike_train.ts >= start_time) & (
multilabel_spike_train.ts < end_time
)
event_mask = (multilabel_spike_train.ts >= start_time) & (multilabel_spike_train.ts < end_time)
ts = multilabel_spike_train.ts[event_mask] - start_time
spike_train = DVSSpikeTrain(
ts.size, width=128, height=128, duration=end_time - start_time + 1
)
spike_train = DVSSpikeTrain(ts.size, width=128, height=128, duration=end_time - start_time + 1)
spike_train.ts = ts
spike_train.x = multilabel_spike_train.x[event_mask]
spike_train.y = multilabel_spike_train.y[event_mask]
Expand Down Expand Up @@ -165,9 +155,7 @@ def __init__(self, path: str, is_train: bool = True):
"""
_, file_extension = os.path.splitext(path)
if file_extension != ".h5":
raise Exception(
"The dvs gesture must first be converted to a .h5 file. Please call H5DvsGesture.Convert"
)
raise Exception("The dvs gesture must first be converted to a .h5 file. Please call H5DvsGesture.Convert")

self.indx = 0 if is_train else 1
self.file_path = path
Expand All @@ -189,9 +177,7 @@ def convert(dvs_folder_path: str, h5_output_path: str, verbose=True):
position_type = h5py.vlen_dtype(np.dtype("uint16"))
time_type = h5py.vlen_dtype(np.dtype("uint32"))

step_counter = tqdm(
total=sum(H5IBMGesture._nb_of_samples), disable=(not verbose)
)
step_counter = tqdm(total=sum(H5IBMGesture._nb_of_samples), disable=(not verbose))

with h5py.File(h5_output_path, "w-") as f:
for (name, gen, length) in zip(
Expand Down Expand Up @@ -223,9 +209,7 @@ def __getitem__(self, index):
tos = file_hndl[name + "_tos"][index]
label = file_hndl[name + "_label"][index]

spike_train = DVSSpikeTrain(
tos.size, width=128, height=128, duration=tos.max() + 1
)
spike_train = DVSSpikeTrain(tos.size, width=128, height=128, duration=tos.max() + 1)
spike_train.x = pos[0]
spike_train.y = pos[1]
spike_train.p = pos[2]
Expand Down
36 changes: 9 additions & 27 deletions ebdataset/vision/ini_roshambo.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,7 @@ def __init__(self, path: str, with_backgrounds=False, transforms=None):

if os.path.isdir(path): # AEDat v2 directory
self.backend = "aedat"
self.samples = filter(
lambda f: os.path.splitext(f)[1] == ".aedat", os.listdir(path)
)
self.samples = filter(lambda f: os.path.splitext(f)[1] == ".aedat", os.listdir(path))
elif os.path.splitext(path)[1] == ".h5":
self.backend = "h5"
with File(path, "r", libver="latest") as f_hndl:
Expand All @@ -54,27 +52,19 @@ def convert(self, out_path, verbose=False):
:return: New Roshambo object with h5 file as backend
"""

if (
self.backend == "h5"
): # Send back object if we're already using an h5 backend
if self.backend == "h5": # Send back object if we're already using an h5 backend
return self

if not (".h5" in out_path):
out_path += ".h5"

with File(out_path, "w-", libver="latest") as f_hndl:
for sample_id in tqdm(self.samples, disable=not verbose):
sparse_spike_train = readAEDATv2_davies(
os.path.join(self.path, sample_id)
)
sparse_spike_train.ts = sparse_spike_train.ts - np.min(
sparse_spike_train.ts
) # Start the sample at t=0
sparse_spike_train = readAEDATv2_davies(os.path.join(self.path, sample_id))
sparse_spike_train.ts = sparse_spike_train.ts - np.min(sparse_spike_train.ts) # Start the sample at t=0
f_hndl[sample_id] = sparse_spike_train

return INIRoshambo(
out_path, with_backgrounds=self.with_backgrounds, transforms=self.transforms
)
return INIRoshambo(out_path, with_backgrounds=self.with_backgrounds, transforms=self.transforms)

def split_to_subsamples(self, out_path, duration_per_sample, verbose=False):
if not (".h5" in out_path):
Expand All @@ -96,15 +86,11 @@ def split_to_subsamples(self, out_path, duration_per_sample, verbose=False):
):
if start_time + duration_per_sample > total_duration: # End
break
sub_mask = (sample.ts >= start_time) & (
sample.ts < start_time + duration_per_sample
)
sub_mask = (sample.ts >= start_time) & (sample.ts < start_time + duration_per_sample)
nb_of_spikes = np.sum(sub_mask)
if nb_of_spikes <= 10:
continue
sub_sample = DVSSpikeTrain(
nb_of_spikes, duration=duration_per_sample
)
sub_sample = DVSSpikeTrain(nb_of_spikes, duration=duration_per_sample)
sub_sample.ts = sample.ts[sub_mask]
sub_sample.ts = sub_sample.ts - np.min(sub_sample.ts) # Start at 0
sub_sample.x = sample.x[sub_mask]
Expand All @@ -121,15 +107,11 @@ def __getitem__(self, index):
if self.backend == "aedat":
filename = os.path.join(self.path, sample_id)
sparse_spike_train = readAEDATv2_davies(filename)
sparse_spike_train.ts = sparse_spike_train.ts - np.min(
sparse_spike_train.ts
) # Start the sample at t=0
sparse_spike_train.ts = sparse_spike_train.ts - np.min(sparse_spike_train.ts) # Start the sample at t=0
elif self.backend == "h5":
with File(self.path, "r", libver="latest") as f_hndl:
sparse_spike_train = f_hndl[sample_id][()]
sparse_spike_train = np.rec.array(
sparse_spike_train, dtype=sparse_spike_train.dtype
).view(DVSSpikeTrain)
sparse_spike_train = np.rec.array(sparse_spike_train, dtype=sparse_spike_train.dtype).view(DVSSpikeTrain)

sparse_spike_train.width = 240
sparse_spike_train.height = 180
Expand Down
10 changes: 6 additions & 4 deletions ebdataset/vision/nmnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,12 @@ def _download_and_unzip(self, output_directory):
test_url = "https://www.dropbox.com/sh/tg2ljlbmtzygrag/AADSKgJ2CjaBWh75HnTNZyhca/Test.zip?dl=1"
train_loc = os.path.join(output_directory, "Train%i.zip" % time.time())
test_loc = os.path.join(output_directory, "Test%i.zip" % time.time())
success = download(train_url, train_loc, desc="Downloading training files") and \
unzip(train_loc, output_directory, desc="Extracting training files") and \
download(test_url, test_loc, desc="Downloading test files") and \
unzip(test_loc, output_directory, desc="Extracting test files")
success = (
download(train_url, train_loc, desc="Downloading training files")
and unzip(train_loc, output_directory, desc="Extracting training files")
and download(test_url, test_loc, desc="Downloading test files")
and unzip(test_loc, output_directory, desc="Extracting test files")
)

if success:
os.remove(train_loc)
Expand Down
8 changes: 2 additions & 6 deletions ebdataset/vision/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@


class ScaleDown(object):
"""Scale down a 2d sparse spike train by factor (both in x and y)
"""
"""Scale down a 2d sparse spike train by factor (both in x and y)"""

def __init__(self, width, height, factor):
self.authorized_x = list(range(0, width, factor))
Expand Down Expand Up @@ -65,10 +64,7 @@ def __call__(self, sparse_spike_train):
ts = (sparse_spike_train.ts * time_scale).astype(int)

dense_spike_train[
sparse_spike_train.x.astype(int),
sparse_spike_train.y.astype(int),
sparse_spike_train.p.astype(int),
ts
sparse_spike_train.x.astype(int), sparse_spike_train.y.astype(int), sparse_spike_train.p.astype(int), ts
] = 1

return dense_spike_train
Expand Down
19 changes: 3 additions & 16 deletions ebdataset/vision/type.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,15 @@
import numpy as np

_dtype = np.dtype(
[("x", np.uint16), ("y", np.uint16), ("p", np.bool_), ("ts", np.uint64)]
)
_dtype = np.dtype([("x", np.uint16), ("y", np.uint16), ("p", np.bool_), ("ts", np.uint64)])


class DVSSpikeTrain(np.recarray):
"""Common type for event based vision datasets"""

__name__ = "SparseVisionSpikeTrain"

def __new__(
cls,
nb_of_spikes,
*args,
width=-1,
height=-1,
duration=-1,
time_scale=1e-6,
**nargs
):
obj = super(DVSSpikeTrain, cls).__new__(
cls, nb_of_spikes, dtype=_dtype, *args, **nargs
)
def __new__(cls, nb_of_spikes, *args, width=-1, height=-1, duration=-1, time_scale=1e-6, **nargs):
obj = super(DVSSpikeTrain, cls).__new__(cls, nb_of_spikes, dtype=_dtype, *args, **nargs)
obj.width = width
obj.height = height
obj.duration = duration
Expand Down
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -28,29 +28,19 @@
NMnist,
PropheseeNCars,
]
dataset_map = dict(
zip([dataset.__name__ for dataset in available_datasets], available_datasets)
)
dataset_map = dict(zip([dataset.__name__ for dataset in available_datasets], available_datasets))

parser.add_argument(
"dataset", help="Dataset - One of [%s]" % " | ".join(dataset_map.keys())
)
parser.add_argument(
"path", help="Path of the data directory or file for the chosen dataset"
)
parser.add_argument("dataset", help="Dataset - One of [%s]" % " | ".join(dataset_map.keys()))
parser.add_argument("path", help="Path of the data directory or file for the chosen dataset")
parser.add_argument(
"-n",
"--num_samples",
help="Number of video samples to generate",
type=int,
default=10,
)
parser.add_argument(
"-d", "--dilatation", help="Time dilatation scale", type=float, default=1.0
) # Default Real time
parser.add_argument(
"-s", "--scale", help="Spatial scaling", type=float, default=1.0
) # Default Real size
parser.add_argument("-d", "--dilatation", help="Time dilatation scale", type=float, default=1.0) # Default Real time
parser.add_argument("-s", "--scale", help="Spatial scaling", type=float, default=1.0) # Default Real size

args = parser.parse_args()
dataset = dataset_map[args.dataset]
Expand All @@ -70,17 +60,13 @@
int(spike_train.height * spatial_scale),
)
out_duration = spike_train.duration * spike_train.time_scale * time_scale
out = cv2.VideoWriter(
filename, cv2.VideoWriter_fourcc(*"MP42"), 60.0, (out_width, out_height)
)
out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc(*"MP42"), 60.0, (out_width, out_height))
for frame_start in np.arange(0.0, out_duration, 1 / 60.0):
frame_end = frame_start + 1 / 60.0
ts = spike_train.ts * spike_train.time_scale * time_scale
mask = (ts >= frame_start) & (ts < frame_end)
frame = np.zeros((out_width, out_height, 3), dtype=np.uint8)
for x, y, p in zip(
spike_train.x[mask], spike_train.y[mask], spike_train.p[mask]
):
for x, y, p in zip(spike_train.x[mask], spike_train.y[mask], spike_train.p[mask]):
frame[
int(x * spatial_scale) : int((x + 1) * spatial_scale),
int(y * spatial_scale) : int((y + 1) * spatial_scale),
Expand Down
Loading

0 comments on commit 67edc1c

Please sign in to comment.