From 66dd69bb6404eb305c69e7b64cc7d902774a9263 Mon Sep 17 00:00:00 2001 From: Mohinta2892 Date: Thu, 2 Nov 2023 17:37:38 +0000 Subject: [PATCH] black reformatted --- docs/source/conf.py | 2 +- examples/cremi/mknet.py | 54 +++++------- examples/cremi/predict.py | 63 ++++++------- examples/cremi/train.py | 137 +++++++++++++---------------- gunpowder/nodes/batch_provider.py | 1 - gunpowder/nodes/deform_augment.py | 1 - gunpowder/nodes/elastic_augment.py | 1 - gunpowder/nodes/noise_augment.py | 2 - gunpowder/nodes/random_location.py | 5 +- gunpowder/nodes/random_provider.py | 1 - gunpowder/nodes/reject.py | 1 - gunpowder/nodes/shift_augment.py | 1 - gunpowder/nodes/simple_augment.py | 1 - gunpowder/nodes/stack.py | 1 - gunpowder/nodes/zarr_source.py | 5 +- tests/cases/torch_train.py | 5 +- 16 files changed, 117 insertions(+), 164 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 529dbe8b..b0da21cf 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output diff --git a/examples/cremi/mknet.py b/examples/cremi/mknet.py index aac8c0df..fe13a7a5 100644 --- a/examples/cremi/mknet.py +++ b/examples/cremi/mknet.py @@ -2,8 +2,8 @@ import tensorflow as tf import json -def create_network(input_shape, name): +def create_network(input_shape, name): tf.reset_default_graph() # create a placeholder for the 3D raw input tensor @@ -11,20 +11,17 @@ def create_network(input_shape, name): # create a U-Net raw_batched = tf.reshape(raw, (1, 1) + input_shape) - unet_output = unet(raw_batched, 6, 4, [[1,3,3],[1,3,3],[1,3,3]]) + unet_output = unet(raw_batched, 6, 4, [[1, 3, 3], [1, 3, 3], [1, 3, 3]]) # add a convolution layer to create 3 output maps representing affinities # in z, y, and x pred_affs_batched = conv_pass( - unet_output, - kernel_size=1, - num_fmaps=3, - num_repetitions=1, - activation='sigmoid') + unet_output, kernel_size=1, num_fmaps=3, num_repetitions=1, activation="sigmoid" + ) # get the shape of the output output_shape_batched = pred_affs_batched.get_shape().as_list() - output_shape = output_shape_batched[1:] # strip the batch dimension + output_shape = output_shape_batched[1:] # strip the batch dimension # the 4D output tensor (3, depth, height, width) pred_affs = tf.reshape(pred_affs_batched, output_shape) @@ -33,46 +30,39 @@ def create_network(input_shape, name): gt_affs = tf.placeholder(tf.float32, shape=output_shape) # create a placeholder for per-voxel loss weights - loss_weights = tf.placeholder( - tf.float32, - shape=output_shape) + loss_weights = tf.placeholder(tf.float32, shape=output_shape) # compute the loss as the weighted mean squared error between the # predicted and the ground-truth affinities - loss = tf.losses.mean_squared_error( - gt_affs, - pred_affs, - loss_weights) + loss = tf.losses.mean_squared_error(gt_affs, pred_affs, loss_weights) # use the Adam optimizer to minimize the loss opt = tf.train.AdamOptimizer( - learning_rate=0.5e-4, - beta1=0.95, - beta2=0.999, - epsilon=1e-8) + learning_rate=0.5e-4, beta1=0.95, beta2=0.999, epsilon=1e-8 + ) optimizer = opt.minimize(loss) # store the network in a meta-graph file - tf.train.export_meta_graph(filename=name + '.meta') + tf.train.export_meta_graph(filename=name + ".meta") # store network configuration for use in train and predict scripts config = { - 'raw': raw.name, - 'pred_affs': pred_affs.name, - 'gt_affs': gt_affs.name, - 'loss_weights': loss_weights.name, - 'loss': loss.name, - 'optimizer': optimizer.name, - 'input_shape': input_shape, - 'output_shape': output_shape[1:] + "raw": raw.name, + "pred_affs": pred_affs.name, + "gt_affs": gt_affs.name, + "loss_weights": loss_weights.name, + "loss": loss.name, + "optimizer": optimizer.name, + "input_shape": input_shape, + "output_shape": output_shape[1:], } - with open(name + '_config.json', 'w') as f: + with open(name + "_config.json", "w") as f: json.dump(config, f) -if __name__ == "__main__": +if __name__ == "__main__": # create a network for training - create_network((84, 268, 268), 'train_net') + create_network((84, 268, 268), "train_net") # create a larger network for faster prediction - create_network((120, 322, 322), 'test_net') + create_network((120, 322, 322), "test_net") diff --git a/examples/cremi/predict.py b/examples/cremi/predict.py index 8693786f..4f229b14 100644 --- a/examples/cremi/predict.py +++ b/examples/cremi/predict.py @@ -2,29 +2,29 @@ import gunpowder as gp import json -def predict(iteration): +def predict(iteration): ################## # DECLARE ARRAYS # ################## # raw intensities - raw = gp.ArrayKey('RAW') + raw = gp.ArrayKey("RAW") # the predicted affinities - pred_affs = gp.ArrayKey('PRED_AFFS') + pred_affs = gp.ArrayKey("PRED_AFFS") #################### # DECLARE REQUESTS # #################### - with open('test_net_config.json', 'r') as f: + with open("test_net_config.json", "r") as f: net_config = json.load(f) # get the input and output size in world units (nm, in this case) voxel_size = gp.Coordinate((40, 4, 4)) - input_size = gp.Coordinate(net_config['input_shape'])*voxel_size - output_size = gp.Coordinate(net_config['output_shape'])*voxel_size + input_size = gp.Coordinate(net_config["input_shape"]) * voxel_size + output_size = gp.Coordinate(net_config["output_shape"]) * voxel_size context = input_size - output_size # formulate the request for what a batch should contain @@ -37,10 +37,8 @@ def predict(iteration): ############################# source = gp.Hdf5Source( - 'sample_A_padded_20160501.hdf', - datasets = { - raw: 'volumes/raw' - }) + "sample_A_padded_20160501.hdf", datasets={raw: "volumes/raw"} + ) # get the ROI provided for raw (we need it later to calculate the ROI in # which we can make predictions) @@ -48,41 +46,35 @@ def predict(iteration): raw_roi = source.spec[raw].roi pipeline = ( - # read from HDF5 file - source + - + source + + # convert raw to float in [0, 1] - gp.Normalize(raw) + - + gp.Normalize(raw) + + # perform one training iteration for each passing batch (here we use # the tensor names earlier stored in train_net.config) gp.tensorflow.Predict( - graph='test_net.meta', - checkpoint='train_net_checkpoint_%d'%iteration, - inputs={ - net_config['raw']: raw - }, - outputs={ - net_config['pred_affs']: pred_affs - }, - array_specs={ - pred_affs: gp.ArraySpec(roi=raw_roi.grow(-context, -context)) - }) + - + graph="test_net.meta", + checkpoint="train_net_checkpoint_%d" % iteration, + inputs={net_config["raw"]: raw}, + outputs={net_config["pred_affs"]: pred_affs}, + array_specs={pred_affs: gp.ArraySpec(roi=raw_roi.grow(-context, -context))}, + ) + + # store all passing batches in the same HDF5 file gp.Hdf5Write( { - raw: '/volumes/raw', - pred_affs: '/volumes/pred_affs', + raw: "/volumes/raw", + pred_affs: "/volumes/pred_affs", }, - output_filename='predictions_sample_A.hdf', - compression_type='gzip' - ) + - + output_filename="predictions_sample_A.hdf", + compression_type="gzip", + ) + + # show a summary of time spend in each node every 10 iterations - gp.PrintProfilingStats(every=10) + - + gp.PrintProfilingStats(every=10) + + # iterate over the whole dataset in a scanning fashion, emitting # requests that match the size of the network gp.Scan(reference=request) @@ -93,5 +85,6 @@ def predict(iteration): # without keeping the complete dataset in memory pipeline.request_batch(gp.BatchRequest()) + if __name__ == "__main__": predict(200000) diff --git a/examples/cremi/train.py b/examples/cremi/train.py index 8edd12f7..6faf7e50 100644 --- a/examples/cremi/train.py +++ b/examples/cremi/train.py @@ -6,41 +6,41 @@ logging.basicConfig(level=logging.INFO) -def train(iterations): +def train(iterations): ################## # DECLARE ARRAYS # ################## # raw intensities - raw = gp.ArrayKey('RAW') + raw = gp.ArrayKey("RAW") # objects labelled with unique IDs - gt_labels = gp.ArrayKey('LABELS') + gt_labels = gp.ArrayKey("LABELS") # array of per-voxel affinities to direct neighbors - gt_affs= gp.ArrayKey('AFFINITIES') + gt_affs = gp.ArrayKey("AFFINITIES") # weights to use to balance the loss - loss_weights = gp.ArrayKey('LOSS_WEIGHTS') + loss_weights = gp.ArrayKey("LOSS_WEIGHTS") # the predicted affinities - pred_affs = gp.ArrayKey('PRED_AFFS') + pred_affs = gp.ArrayKey("PRED_AFFS") # the gredient of the loss wrt to the predicted affinities - pred_affs_gradients = gp.ArrayKey('PRED_AFFS_GRADIENTS') + pred_affs_gradients = gp.ArrayKey("PRED_AFFS_GRADIENTS") #################### # DECLARE REQUESTS # #################### - with open('train_net_config.json', 'r') as f: + with open("train_net_config.json", "r") as f: net_config = json.load(f) # get the input and output size in world units (nm, in this case) voxel_size = gp.Coordinate((40, 4, 4)) - input_size = gp.Coordinate(net_config['input_shape'])*voxel_size - output_size = gp.Coordinate(net_config['output_shape'])*voxel_size + input_size = gp.Coordinate(net_config["input_shape"]) * voxel_size + output_size = gp.Coordinate(net_config["output_shape"]) * voxel_size # formulate the request for what a batch should (at least) contain request = gp.BatchRequest() @@ -60,44 +60,38 @@ def train(iterations): ############################## pipeline = ( - # a tuple of sources, one for each sample (A, B, and C) provided by the # CREMI challenge tuple( - # read batches from the HDF5 file gp.Hdf5Source( - 'sample_'+s+'_padded_20160501.hdf', - datasets = { - raw: 'volumes/raw', - gt_labels: 'volumes/labels/neuron_ids' - } - ) + - + "sample_" + s + "_padded_20160501.hdf", + datasets={raw: "volumes/raw", gt_labels: "volumes/labels/neuron_ids"}, + ) + + # convert raw to float in [0, 1] gp.Normalize(raw) + - # chose a random location for each requested batch gp.RandomLocation() - - for s in ['A', 'B', 'C'] - ) + - + for s in ["A", "B", "C"] + ) + + # chose a random source (i.e., sample) from the above - gp.RandomProvider() + - + gp.RandomProvider() + + # elastically deform the batch gp.ElasticAugment( - [4,40,40], - [0,2,2], - [0,math.pi/2.0], + [4, 40, 40], + [0, 2, 2], + [0, math.pi / 2.0], prob_slip=0.05, prob_shift=0.05, - max_misalign=25) + - + max_misalign=25, + ) + + # apply transpose and mirror augmentations - gp.SimpleAugment(transpose_only=[1, 2]) + - + gp.SimpleAugment(transpose_only=[1, 2]) + + # scale and shift the intensity of the raw array gp.IntensityAugment( raw, @@ -105,65 +99,54 @@ def train(iterations): scale_max=1.1, shift_min=-0.1, shift_max=0.1, - z_section_wise=True) + - + z_section_wise=True, + ) + + # grow a boundary between labels - gp.GrowBoundary( - gt_labels, - steps=3, - only_xy=True) + - + gp.GrowBoundary(gt_labels, steps=3, only_xy=True) + + # convert labels into affinities between voxels - gp.AddAffinities( - [[-1, 0, 0], [0, -1, 0], [0, 0, -1]], - gt_labels, - gt_affs) + - + gp.AddAffinities([[-1, 0, 0], [0, -1, 0], [0, 0, -1]], gt_labels, gt_affs) + + # create a weight array that balances positive and negative samples in # the affinity array - gp.BalanceLabels( - gt_affs, - loss_weights) + - + gp.BalanceLabels(gt_affs, loss_weights) + + # pre-cache batches from the point upstream - gp.PreCache( - cache_size=10, - num_workers=5) + - + gp.PreCache(cache_size=10, num_workers=5) + + # perform one training iteration for each passing batch (here we use # the tensor names earlier stored in train_net.config) gp.tensorflow.Train( - 'train_net', - net_config['optimizer'], - net_config['loss'], + "train_net", + net_config["optimizer"], + net_config["loss"], inputs={ - net_config['raw']: raw, - net_config['gt_affs']: gt_affs, - net_config['loss_weights']: loss_weights + net_config["raw"]: raw, + net_config["gt_affs"]: gt_affs, + net_config["loss_weights"]: loss_weights, }, - outputs={ - net_config['pred_affs']: pred_affs - }, - gradients={ - net_config['pred_affs']: pred_affs_gradients - }, - save_every=1) + - + outputs={net_config["pred_affs"]: pred_affs}, + gradients={net_config["pred_affs"]: pred_affs_gradients}, + save_every=1, + ) + + # save the passing batch as an HDF5 file for inspection gp.Snapshot( { - raw: '/volumes/raw', - gt_labels: '/volumes/labels/neuron_ids', - gt_affs: '/volumes/labels/affs', - pred_affs: '/volumes/pred_affs', - pred_affs_gradients: '/volumes/pred_affs_gradients' + raw: "/volumes/raw", + gt_labels: "/volumes/labels/neuron_ids", + gt_affs: "/volumes/labels/affs", + pred_affs: "/volumes/pred_affs", + pred_affs_gradients: "/volumes/pred_affs_gradients", }, - output_dir='snapshots', - output_filename='batch_{iteration}.hdf', + output_dir="snapshots", + output_filename="batch_{iteration}.hdf", every=100, additional_request=snapshot_request, - compression_type='gzip') + - + compression_type="gzip", + ) + + # show a summary of time spend in each node every 10 iterations gp.PrintProfilingStats(every=10) ) @@ -180,6 +163,6 @@ def train(iterations): print("Finished") + if __name__ == "__main__": train(200000) - \ No newline at end of file diff --git a/gunpowder/nodes/batch_provider.py b/gunpowder/nodes/batch_provider.py index dc641c8e..1f6b9dc8 100644 --- a/gunpowder/nodes/batch_provider.py +++ b/gunpowder/nodes/batch_provider.py @@ -174,7 +174,6 @@ def request_batch(self, request): batch = None try: - self.set_seeds(request) logger.debug("%s got request %s", self.name(), request) diff --git a/gunpowder/nodes/deform_augment.py b/gunpowder/nodes/deform_augment.py index cdf5eeff..07768293 100644 --- a/gunpowder/nodes/deform_augment.py +++ b/gunpowder/nodes/deform_augment.py @@ -129,7 +129,6 @@ def setup(self): self.provides(self.transform_key, spec) def prepare(self, request): - # get the total ROI of all requests total_roi = request.get_total_roi() logger.debug("total ROI is %s" % total_roi) diff --git a/gunpowder/nodes/elastic_augment.py b/gunpowder/nodes/elastic_augment.py index a70f7866..d999d6fe 100644 --- a/gunpowder/nodes/elastic_augment.py +++ b/gunpowder/nodes/elastic_augment.py @@ -124,7 +124,6 @@ def __init__( self.recompute_missing_points = recompute_missing_points def prepare(self, request): - # get the voxel size self.voxel_size = self.__get_common_voxel_size(request) diff --git a/gunpowder/nodes/noise_augment.py b/gunpowder/nodes/noise_augment.py index f4bfb5ba..5275a2c0 100644 --- a/gunpowder/nodes/noise_augment.py +++ b/gunpowder/nodes/noise_augment.py @@ -57,13 +57,11 @@ def process(self, batch, request): seed = request.random_seed try: - raw.data = skimage.util.random_noise( raw.data, mode=self.mode, rng=seed, clip=self.clip, **self.kwargs ).astype(raw.data.dtype) except ValueError: - # legacy version of skimage random_noise raw.data = skimage.util.random_noise( raw.data, mode=self.mode, seed=seed, clip=self.clip, **self.kwargs diff --git a/gunpowder/nodes/random_location.py b/gunpowder/nodes/random_location.py index fccbd6cb..d5b6c1e2 100644 --- a/gunpowder/nodes/random_location.py +++ b/gunpowder/nodes/random_location.py @@ -172,7 +172,6 @@ def setup(self): self.provides(self.random_shift_key, ArraySpec(nonspatial=True)) def prepare(self, request): - logger.debug("request: %s", request.array_specs) logger.debug("my spec: %s", self.spec) @@ -383,9 +382,7 @@ def __select_random_location_with_points( logger.debug("belongs to lcm voxel %s", lcm_location) # align the point request ROI with lcm voxel grid - lcm_roi = request_points_roi.snap_to_grid( - lcm_voxel_size, - mode="shrink") + lcm_roi = request_points_roi.snap_to_grid(lcm_voxel_size, mode="shrink") lcm_roi = lcm_roi / lcm_voxel_size logger.debug("Point request ROI: %s", request_points_roi) logger.debug("Point request lcm ROI shape: %s", lcm_roi.shape) diff --git a/gunpowder/nodes/random_provider.py b/gunpowder/nodes/random_provider.py index dfb086f8..a9ae1081 100644 --- a/gunpowder/nodes/random_provider.py +++ b/gunpowder/nodes/random_provider.py @@ -69,7 +69,6 @@ def setup(self): self.provides(self.random_provider_key, ArraySpec(nonspatial=True)) def provide(self, request): - if self.random_provider_key is not None: del request[self.random_provider_key] diff --git a/gunpowder/nodes/reject.py b/gunpowder/nodes/reject.py index b6a47436..87bb83aa 100644 --- a/gunpowder/nodes/reject.py +++ b/gunpowder/nodes/reject.py @@ -55,7 +55,6 @@ def setup(self): self.upstream_provider = self.get_upstream_provider() def provide(self, request): - report_next_timeout = 10 num_rejected = 0 diff --git a/gunpowder/nodes/shift_augment.py b/gunpowder/nodes/shift_augment.py index 8fe6524b..8761a563 100644 --- a/gunpowder/nodes/shift_augment.py +++ b/gunpowder/nodes/shift_augment.py @@ -24,7 +24,6 @@ def __init__(self, prob_slip=0, prob_shift=0, sigma=0, shift_axis=0): self.lcm_voxel_size = None def prepare(self, request): - self.ndim = request.get_total_roi().dims assert self.shift_axis in range(self.ndim) diff --git a/gunpowder/nodes/simple_augment.py b/gunpowder/nodes/simple_augment.py index dc756e3a..f5a97333 100644 --- a/gunpowder/nodes/simple_augment.py +++ b/gunpowder/nodes/simple_augment.py @@ -106,7 +106,6 @@ def setup(self): self.permutation_dict[k] = v def prepare(self, request): - self.mirror = [ random.random() < self.mirror_probs[d] if self.mirror_mask[d] else 0 for d in range(self.dims) diff --git a/gunpowder/nodes/stack.py b/gunpowder/nodes/stack.py index 21f53acc..5d7feabd 100644 --- a/gunpowder/nodes/stack.py +++ b/gunpowder/nodes/stack.py @@ -25,7 +25,6 @@ def __init__(self, num_repetitions): self.num_repetitions = num_repetitions def provide(self, request): - batches = [] for _ in range(self.num_repetitions): upstream_request = request.copy() diff --git a/gunpowder/nodes/zarr_source.py b/gunpowder/nodes/zarr_source.py index 812769f3..b7133580 100644 --- a/gunpowder/nodes/zarr_source.py +++ b/gunpowder/nodes/zarr_source.py @@ -107,9 +107,8 @@ def _get_offset(self, dataset): def _rev_metadata(self): with ZarrFile(self.store, mode="a") as store: - return ( - isinstance(store.chunk_store, N5Store) or - isinstance(store.chunk_store, N5FSStore) + return isinstance(store.chunk_store, N5Store) or isinstance( + store.chunk_store, N5FSStore ) def _open_file(self, store): diff --git a/tests/cases/torch_train.py b/tests/cases/torch_train.py index bdcc612f..07c04e53 100644 --- a/tests/cases/torch_train.py +++ b/tests/cases/torch_train.py @@ -203,7 +203,7 @@ def forward(self, a, b): checkpoint_basename=checkpoint_basename, save_every=100, spawn_subprocess=True, - device='cuda:0' + device="cuda:0", ) pipeline = source + train @@ -348,7 +348,7 @@ def forward(self, a, b): d_pred: ArraySpec(nonspatial=True), }, spawn_subprocess=True, - device="cuda:0" + device="cuda:0", ) pipeline = source + predict @@ -373,6 +373,7 @@ def forward(self, a, b): if not isinstance(torch, NoSuchModule): + class ExampleModel(torch.nn.Module): def __init__(self): super(ExampleModel, self).__init__()