From 93a8ccc75ebea809ae9445bdea8a53e9b7496ba5 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 28 Oct 2019 16:25:22 +0100 Subject: [PATCH 01/40] Added dndarray property 'stride' (same as torch.Tensor.stride). --- heat/core/dndarray.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/heat/core/dndarray.py b/heat/core/dndarray.py index a4526a6ffe..ecce72abcf 100644 --- a/heat/core/dndarray.py +++ b/heat/core/dndarray.py @@ -165,6 +165,15 @@ def split(self): int : the axis on which the tensor split """ return self.__split + + @property + def stride(self): + """ + Returns + ------- + tuple of ints: steps in each dimension when traversing the tensor on each node + """ + return self.__array.stride @property def T(self, axes=None): From c2faa472cd24878d1128d2a89a980abb415428da Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Wed, 20 Nov 2019 09:07:24 +0100 Subject: [PATCH 02/40] Implemented tensor property strides (numpy-like), added to docs. --- heat/core/dndarray.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/heat/core/dndarray.py b/heat/core/dndarray.py index 3e4d124ac3..c518849323 100644 --- a/heat/core/dndarray.py +++ b/heat/core/dndarray.py @@ -171,10 +171,21 @@ def stride(self): """ Returns ------- - tuple of ints: steps in each dimension when traversing the tensor on each node + tuple of ints: steps in each dimension when traversing a tensor. + torch-like usage: self.stride() """ return self.__array.stride + @property + def strides(self): + """ + Returns + ------- + tuple of ints: bytes to step in each dimension when traversing a tensor. + numpy-like usage: self.strides + """ + return self.numpy().strides + @property def T(self, axes=None): return linalg.transpose(self, axes) From 7c12fe55a24fcae0f2ee7b84b4966aa2c5e295e4 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Fri, 22 Nov 2019 16:41:41 +0100 Subject: [PATCH 03/40] First pass of column-first memory layout, single-node only. --- heat/core/factories.py | 43 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/heat/core/factories.py b/heat/core/factories.py index 6d574af067..c3744d3119 100644 --- a/heat/core/factories.py +++ b/heat/core/factories.py @@ -131,7 +131,17 @@ def arange(*args, dtype=None, split=None, device=None, comm=None): return dndarray.DNDarray(data, gshape, htype, split, device, comm) -def array(obj, dtype=None, copy=True, ndmin=0, split=None, is_split=None, device=None, comm=None): +def array( + obj, + dtype=None, + copy=True, + ndmin=0, + order="C", + split=None, + is_split=None, + device=None, + comm=None, +): """ Create a tensor. Parameters @@ -149,6 +159,17 @@ def array(obj, dtype=None, copy=True, ndmin=0, split=None, is_split=None, device ndmin : int, optional Specifies the minimum number of dimensions that the resulting array should have. Ones will, if needed, be attached to the shape if ndim>0 and prefaced in case of ndim<0 to meet the requirement. + order: str, optional + #TODO: make sure all options are covered + Specify the memory layout of the array. If object is not an array, the newly created array will be in C order (row major) unless ‘F’ is specified, in which case it will be in Fortran order (column major). If object is an array the following holds. + order no copy copy=True + #TODO‘K’ unchanged F & C order preserved, otherwise most similar order + #TODO‘A’ unchanged F order if input is F and not C, otherwise C order + #TODO‘C’ C order C order + #TODO‘F’ F order F order + + #TODO: When copy=False and a copy is made for other reasons, the result is the same as if copy=True, with some exceptions for A, see the Notes section. The default order is ‘K’. + split : None or int, optional The axis along which the passed array content obj is split and distributed in memory. Mutually exclusive with is_split. @@ -198,6 +219,8 @@ def array(obj, dtype=None, copy=True, ndmin=0, split=None, is_split=None, device (1/2) >>> ht.array([3, 4], is_split=0) (0/2) tensor([1, 2, 3, 4]) (1/2) tensor([1, 2, 3, 4]) + + #TODO: example with C or F order """ # extract the internal tensor in case of a heat tensor if isinstance(obj, dndarray.DNDarray): @@ -210,7 +233,9 @@ def array(obj, dtype=None, copy=True, ndmin=0, split=None, is_split=None, device # initialize the array if bool(copy): if isinstance(obj, torch.Tensor): + print("BEFORE CLONE.DETACH: ", obj.storage()) obj = obj.clone().detach() + print("AFTER CLONE.DETACH: ", obj.storage()) else: try: obj = torch.tensor(obj, dtype=dtype.torch_type() if dtype is not None else None) @@ -246,6 +271,22 @@ def array(obj, dtype=None, copy=True, ndmin=0, split=None, is_split=None, device lshape = np.array(obj.shape) gshape = lshape.copy() + # assign memory layout before splitting + print("ORIGINAL LAYOUT: ", obj, obj.storage(), obj.stride()) + if order == "F" and obj.stride()[0] != 1: + print("CALCULATING NEW_STRIDE") + # column-major memory layout + new_stride = (1,) + tuple( + np.prod(gshape[-len(gshape) : -len(gshape) + i]) for i in range(1, len(gshape)) + ) + dims = list(range(obj.ndim)) + dims[0], dims[-1] = dims[-1], dims[0] + permutation = tuple(dims) + obj = obj.permute(permutation).contiguous() + obj = obj.set_(obj.storage(), obj.storage_offset(), tuple(gshape), new_stride) + + print("MODIFIED LAYOUT: ", obj, obj.storage(), obj.stride()) + # content shall be split, chunk the passed data object up if split is not None: _, _, slices = comm.chunk(obj.shape, split) From b01511b7bf82a6bb28bb9a98d934fe1b9f80b176 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Sat, 23 Nov 2019 08:28:46 +0100 Subject: [PATCH 04/40] Implemented stride_tricks.sanitize_memory_layout, first pass. --- heat/core/stride_tricks.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/heat/core/stride_tricks.py b/heat/core/stride_tricks.py index f566ae0d25..0133417ced 100644 --- a/heat/core/stride_tricks.py +++ b/heat/core/stride_tricks.py @@ -111,6 +111,40 @@ def sanitize_axis(shape, axis): return axis +def sanitize_memory_layout(x, order="K"): + """ + x: torch.tensor + order: str, optional. can be "K"?, "A"?, "C", "F". #TODO NotImplementedError for "A" + + """ + dims = list(range(x.ndim)) + shape = x.shape + #assess current layout + row_major = bool(x.stride()[i] > x.stride()[i + 1] for i in dims) + if not row_major: + column_major = bool(x.stride()[i] < x.stride()[i + 1] for i in dims) + if not column_major: + #TODO: NotImplementedError, only row-major or column-major memory layout allowed for now + pass + if order == "K": + # TODO: return NotImplementedError, usage of clone() means losing original layout for now + pass + if (order == "F" and not column_major) or (order == "C" and not row-major): + if order == "F": + new_stride = (1,) + tuple( + np.prod(shape[-x.ndim : -x.ndim + i]) for i in dims[1:]) + ) + if order == "C": + new_stride = tuple( + np.prod(shape[i+1:]) for i in dims[:-1] + ) + (1,) + dims[0], dims[-1] = dims[-1], dims[0] + permutation = tuple(dims) + x = x.permute(permutation).contiguous() + x = x.set_(x.storage(), x.storage_offset(), shape, new_stride) + return x + + def sanitize_shape(shape): """ Verifies and normalizes the given shape. From d01d14d5d4d1e10ec7b1ba3fe95fb4757f78f433 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 25 Nov 2019 10:39:25 +0100 Subject: [PATCH 05/40] Property DNDarray.strides now gets the correct information from the underlying torch tensor and no longer gets the wrong info from from the numpyfied DNDarray --- heat/core/dndarray.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/heat/core/dndarray.py b/heat/core/dndarray.py index fde1bac90c..e819c5ecdb 100644 --- a/heat/core/dndarray.py +++ b/heat/core/dndarray.py @@ -184,7 +184,9 @@ def strides(self): tuple of ints: bytes to step in each dimension when traversing a tensor. numpy-like usage: self.strides """ - return self.numpy().strides + stride = np.array(self._DNDarray__array.stride()) + itemsize = self._DNDarray__array.storage().element_size() + return tuple(stride*itemsize) @property def T(self, axes=None): From 5b6276d392404fa8174dd3a5770901cb259952e1 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 25 Nov 2019 10:43:24 +0100 Subject: [PATCH 06/40] Moved sanitize_memory_layout from stride_tricks to module memory. --- heat/core/stride_tricks.py | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/heat/core/stride_tricks.py b/heat/core/stride_tricks.py index 0133417ced..f566ae0d25 100644 --- a/heat/core/stride_tricks.py +++ b/heat/core/stride_tricks.py @@ -111,40 +111,6 @@ def sanitize_axis(shape, axis): return axis -def sanitize_memory_layout(x, order="K"): - """ - x: torch.tensor - order: str, optional. can be "K"?, "A"?, "C", "F". #TODO NotImplementedError for "A" - - """ - dims = list(range(x.ndim)) - shape = x.shape - #assess current layout - row_major = bool(x.stride()[i] > x.stride()[i + 1] for i in dims) - if not row_major: - column_major = bool(x.stride()[i] < x.stride()[i + 1] for i in dims) - if not column_major: - #TODO: NotImplementedError, only row-major or column-major memory layout allowed for now - pass - if order == "K": - # TODO: return NotImplementedError, usage of clone() means losing original layout for now - pass - if (order == "F" and not column_major) or (order == "C" and not row-major): - if order == "F": - new_stride = (1,) + tuple( - np.prod(shape[-x.ndim : -x.ndim + i]) for i in dims[1:]) - ) - if order == "C": - new_stride = tuple( - np.prod(shape[i+1:]) for i in dims[:-1] - ) + (1,) - dims[0], dims[-1] = dims[-1], dims[0] - permutation = tuple(dims) - x = x.permute(permutation).contiguous() - x = x.set_(x.storage(), x.storage_offset(), shape, new_stride) - return x - - def sanitize_shape(shape): """ Verifies and normalizes the given shape. From 2d0596d24f8cc8c9bba7a505da513d5e64adab1d Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 25 Nov 2019 10:44:35 +0100 Subject: [PATCH 07/40] Implemented memory.sanitize_memory_layout. --- heat/core/memory.py | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/heat/core/memory.py b/heat/core/memory.py index acd1827b28..3c4689a4c9 100644 --- a/heat/core/memory.py +++ b/heat/core/memory.py @@ -1,6 +1,7 @@ +import numpy as np from . import dndarray -__all__ = ["copy"] +__all__ = ["copy", "sanitize_memory_layout"] def copy(a): @@ -22,3 +23,33 @@ def copy(a): return dndarray.DNDarray( a._DNDarray__array.clone(), a.shape, a.dtype, a.split, a.device, a.comm ) + + +def sanitize_memory_layout(x, order="C"): + """ + x: torch.tensor + order: str, optional. can be "K"?, "A"?, "C", "F". #TODO NotImplementedError for "A" + + """ + dims = list(range(x.ndim)) + shape = x.shape + # assess current layout + row_major = bool(x.stride()[i] > x.stride()[i + 1] for i in dims) + column_major = False if row_major else bool(x.stride()[i] < x.stride()[i + 1] for i in dims) + if not row_major and not column_major: + raise NotImplementedError( + "Expecting row/major or column-major memory layout, not implemented for alternative layouts." + ) + if order == "K": + raise NotImplementedError( + "Internal usage of torch.clone() means losing original memory layout for now. \n Please specify order='C' for row-major, order='F' for column-major layout." + ) + if order == "C" and not row_major: + new_stride = tuple(np.prod(shape[i + 1 :]) for i in dims[:-1]) + (1,) + elif order == "F" and not column_major: + new_stride = (1,) + tuple(np.prod(shape[-x.ndim : -x.ndim + i]) for i in dims[1:]) + dims[0], dims[-1] = dims[-1], dims[0] + permutation = tuple(dims) + x = x.permute(permutation).contiguous() + x = x.set_(x.storage(), x.storage_offset(), shape, new_stride) + return x From 5ace9e1de0203aec2e7b3fcb2f7a58ca9e6d72b7 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 25 Nov 2019 10:45:37 +0100 Subject: [PATCH 08/40] Introduced attribute order in factories.array, enables specification of memory layout (row- or column-major). --- heat/core/factories.py | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/heat/core/factories.py b/heat/core/factories.py index c3744d3119..de59afdda6 100644 --- a/heat/core/factories.py +++ b/heat/core/factories.py @@ -5,6 +5,7 @@ from .stride_tricks import sanitize_axis, sanitize_shape from . import devices from . import dndarray +from . import memory from . import types __all__ = [ @@ -233,9 +234,9 @@ def array( # initialize the array if bool(copy): if isinstance(obj, torch.Tensor): - print("BEFORE CLONE.DETACH: ", obj.storage()) + # TODO: watch out. At the moment clone() implies losing the underlying memory layout. + # pytorch fix in progress obj = obj.clone().detach() - print("AFTER CLONE.DETACH: ", obj.storage()) else: try: obj = torch.tensor(obj, dtype=dtype.torch_type() if dtype is not None else None) @@ -271,28 +272,30 @@ def array( lshape = np.array(obj.shape) gshape = lshape.copy() - # assign memory layout before splitting - print("ORIGINAL LAYOUT: ", obj, obj.storage(), obj.stride()) - if order == "F" and obj.stride()[0] != 1: - print("CALCULATING NEW_STRIDE") - # column-major memory layout - new_stride = (1,) + tuple( - np.prod(gshape[-len(gshape) : -len(gshape) + i]) for i in range(1, len(gshape)) - ) - dims = list(range(obj.ndim)) - dims[0], dims[-1] = dims[-1], dims[0] - permutation = tuple(dims) - obj = obj.permute(permutation).contiguous() - obj = obj.set_(obj.storage(), obj.storage_offset(), tuple(gshape), new_stride) - - print("MODIFIED LAYOUT: ", obj, obj.storage(), obj.stride()) + # # assign memory layout before splitting + # print("ORIGINAL LAYOUT: ", obj, obj.storage(), obj.stride()) + # if order == "F" and obj.stride()[0] != 1: + # print("CALCULATING NEW_STRIDE") + # # column-major memory layout + # new_stride = (1,) + tuple( + # np.prod(gshape[-len(gshape) : -len(gshape) + i]) for i in range(1, len(gshape)) + # ) + # dims = list(range(obj.ndim)) + # dims[0], dims[-1] = dims[-1], dims[0] + # permutation = tuple(dims) + # obj = obj.permute(permutation).contiguous() + # obj = obj.set_(obj.storage(), obj.storage_offset(), tuple(gshape), new_stride) + + # print("MODIFIED LAYOUT: ", obj, obj.storage(), obj.stride()) # content shall be split, chunk the passed data object up if split is not None: _, _, slices = comm.chunk(obj.shape, split) obj = obj[slices].clone() + obj = memory.sanitize_memory_layout(obj, order=order) # check with the neighboring rank whether the local shape would fit into a global shape elif is_split is not None: + obj = memory.sanitize_memory_layout(obj, order=order) if comm.rank < comm.size - 1: comm.Isend(lshape, dest=comm.rank + 1) if comm.rank != 0: @@ -325,6 +328,8 @@ def array( comm.Allreduce(MPI.IN_PLACE, ttl_shape, MPI.SUM) gshape[is_split] = ttl_shape[is_split] split = is_split + elif solit is None and is_split is None: + obj = memory.sanitize_memory_layout(obj, order=order) return dndarray.DNDarray(obj, tuple(int(ele) for ele in gshape), dtype, split, device, comm) From 89caeaa7268b8b3a79b7f52852fb08d85f378fe6 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 25 Nov 2019 11:32:03 +0100 Subject: [PATCH 09/40] sanitize_memory_layout(), moved tests for least likely occurrencies to the bottom --- heat/core/memory.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/heat/core/memory.py b/heat/core/memory.py index 3c4689a4c9..b1c35b4977 100644 --- a/heat/core/memory.py +++ b/heat/core/memory.py @@ -33,9 +33,17 @@ def sanitize_memory_layout(x, order="C"): """ dims = list(range(x.ndim)) shape = x.shape - # assess current layout row_major = bool(x.stride()[i] > x.stride()[i + 1] for i in dims) column_major = False if row_major else bool(x.stride()[i] < x.stride()[i + 1] for i in dims) + if (order == "C" and column_major) or (order == "F" and row_major): + if column_major: + new_stride = tuple(np.prod(shape[i + 1 :]) for i in dims[:-1]) + (1,) + elif row_major: + new_stride = (1,) + tuple(np.prod(shape[-x.ndim : -x.ndim + i]) for i in dims[1:]) + dims[0], dims[-1] = dims[-1], dims[0] + permutation = tuple(dims) + x = x.permute(permutation).contiguous() + x = x.set_(x.storage(), x.storage_offset(), shape, new_stride) if not row_major and not column_major: raise NotImplementedError( "Expecting row/major or column-major memory layout, not implemented for alternative layouts." @@ -44,12 +52,4 @@ def sanitize_memory_layout(x, order="C"): raise NotImplementedError( "Internal usage of torch.clone() means losing original memory layout for now. \n Please specify order='C' for row-major, order='F' for column-major layout." ) - if order == "C" and not row_major: - new_stride = tuple(np.prod(shape[i + 1 :]) for i in dims[:-1]) + (1,) - elif order == "F" and not column_major: - new_stride = (1,) + tuple(np.prod(shape[-x.ndim : -x.ndim + i]) for i in dims[1:]) - dims[0], dims[-1] = dims[-1], dims[0] - permutation = tuple(dims) - x = x.permute(permutation).contiguous() - x = x.set_(x.storage(), x.storage_offset(), shape, new_stride) return x From bfaec9cd055fe5c2dc2129e8d0734649cfd830b2 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 25 Nov 2019 11:33:00 +0100 Subject: [PATCH 10/40] Fixed typos. --- heat/core/factories.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/heat/core/factories.py b/heat/core/factories.py index de59afdda6..7cf8113b2c 100644 --- a/heat/core/factories.py +++ b/heat/core/factories.py @@ -328,7 +328,7 @@ def array( comm.Allreduce(MPI.IN_PLACE, ttl_shape, MPI.SUM) gshape[is_split] = ttl_shape[is_split] split = is_split - elif solit is None and is_split is None: + elif split is None and is_split is None: obj = memory.sanitize_memory_layout(obj, order=order) return dndarray.DNDarray(obj, tuple(int(ele) for ele in gshape), dtype, split, device, comm) From a60482f0e1c0efd21a5ec6b4eac7bd00c422bf69 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Wed, 27 Nov 2019 07:58:30 +0100 Subject: [PATCH 11/40] ht.array, docs and examples for attribute "order" defining memory layout. --- heat/core/factories.py | 95 ++++++++++++++++++++++++++++++------------ 1 file changed, 68 insertions(+), 27 deletions(-) diff --git a/heat/core/factories.py b/heat/core/factories.py index 7cf8113b2c..3fb32f3047 100644 --- a/heat/core/factories.py +++ b/heat/core/factories.py @@ -161,16 +161,10 @@ def array( Specifies the minimum number of dimensions that the resulting array should have. Ones will, if needed, be attached to the shape if ndim>0 and prefaced in case of ndim<0 to meet the requirement. order: str, optional - #TODO: make sure all options are covered - Specify the memory layout of the array. If object is not an array, the newly created array will be in C order (row major) unless ‘F’ is specified, in which case it will be in Fortran order (column major). If object is an array the following holds. - order no copy copy=True - #TODO‘K’ unchanged F & C order preserved, otherwise most similar order - #TODO‘A’ unchanged F order if input is F and not C, otherwise C order - #TODO‘C’ C order C order - #TODO‘F’ F order F order - - #TODO: When copy=False and a copy is made for other reasons, the result is the same as if copy=True, with some exceptions for A, see the Notes section. The default order is ‘K’. - + Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array + will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). + Raises NotImplementedError for NumPy options 'K' and 'A'. + #TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released. split : None or int, optional The axis along which the passed array content obj is split and distributed in memory. Mutually exclusive with is_split. @@ -221,7 +215,70 @@ def array( (0/2) tensor([1, 2, 3, 4]) (1/2) tensor([1, 2, 3, 4]) - #TODO: example with C or F order + Memory layout, single-node: + >>> a = np.arange(2 * 3).reshape(2, 3) + >>> a + array([[ 0, 1, 2], + [ 3, 4, 5]]) + >>> a.strides + (24, 8) + >>> b = ht.array(a) + >>> b + tensor([[0, 1, 2], + [3, 4, 5]]) + >>> b.strides + (24, 8) + >>> b._DNDarray__array.storage() #TODO: implement ht.view() + 0 + 1 + 2 + 3 + 4 + 5 + [torch.LongStorage of size 6] + >>> c = ht.array(a, order='F') + >>> c + tensor([[0, 1, 2], + [3, 4, 5]]) + >>> c.strides + (8, 16) + >>> c._DNDarray__array.storage() #TODO: implement ht.view() + 0 + 3 + 1 + 4 + 2 + 5 + [torch.LongStorage of size 6] + + Memory layout, distributed: + >>> a = np.arange(4 * 3).reshape(4, 3) + >>> a.strides + (24, 8) + >>> b = ht.array(a, order='F') + >>> b + (0/2) tensor([[0, 1, 2], + [3, 4, 5]]) + (1/2) tensor([[ 6, 7, 8], + [ 9, 10, 11]]) + >>> b.strides + (0/2) (8, 16) + (1/2) (8, 16) + >>> b._DNDarray__array.storage() #TODO: implement ht.view() + (0/2) 0 + 3 + 1 + 4 + 2 + 5 + [torch.LongStorage of size 6] + (1/2) 6 + 9 + 7 + 10 + 8 + 11 + [torch.LongStorage of size 6] """ # extract the internal tensor in case of a heat tensor if isinstance(obj, dndarray.DNDarray): @@ -272,22 +329,6 @@ def array( lshape = np.array(obj.shape) gshape = lshape.copy() - # # assign memory layout before splitting - # print("ORIGINAL LAYOUT: ", obj, obj.storage(), obj.stride()) - # if order == "F" and obj.stride()[0] != 1: - # print("CALCULATING NEW_STRIDE") - # # column-major memory layout - # new_stride = (1,) + tuple( - # np.prod(gshape[-len(gshape) : -len(gshape) + i]) for i in range(1, len(gshape)) - # ) - # dims = list(range(obj.ndim)) - # dims[0], dims[-1] = dims[-1], dims[0] - # permutation = tuple(dims) - # obj = obj.permute(permutation).contiguous() - # obj = obj.set_(obj.storage(), obj.storage_offset(), tuple(gshape), new_stride) - - # print("MODIFIED LAYOUT: ", obj, obj.storage(), obj.stride()) - # content shall be split, chunk the passed data object up if split is not None: _, _, slices = comm.chunk(obj.shape, split) From dba61a2c55452578d7665a7d49a41a470c874ff3 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Wed, 27 Nov 2019 13:06:22 +0100 Subject: [PATCH 12/40] Expanded documentation to sanitize_memory_layout(). --- heat/core/memory.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/heat/core/memory.py b/heat/core/memory.py index b1c35b4977..8a2f25514f 100644 --- a/heat/core/memory.py +++ b/heat/core/memory.py @@ -27,9 +27,17 @@ def copy(a): def sanitize_memory_layout(x, order="C"): """ + Return the given object with memory layout as defined below. + + Parameters + ----------- + x: torch.tensor - order: str, optional. can be "K"?, "A"?, "C", "F". #TODO NotImplementedError for "A" - + Input data + + order: str, optional. + Default is 'C' as in C-like (row-major) memory layout. The array is stored in memory rows first. + Alternative is 'F', as in Fortran-like (column-major) memory layout. The array is stored columns first. """ dims = list(range(x.ndim)) shape = x.shape From 90b6e1618143912084296f3bb56d7182e9447fad Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Wed, 27 Nov 2019 14:46:00 +0100 Subject: [PATCH 13/40] Keyword argument order introduced for all factories (except those creating 1D list). --- heat/core/factories.py | 77 ++++++++++++++++++++++++++++++------------ 1 file changed, 56 insertions(+), 21 deletions(-) diff --git a/heat/core/factories.py b/heat/core/factories.py index 3fb32f3047..2c272d05d7 100644 --- a/heat/core/factories.py +++ b/heat/core/factories.py @@ -128,6 +128,7 @@ def arange(*args, dtype=None, split=None, device=None, comm=None): htype = types.canonical_heat_type(dtype) data = data.type(htype.torch_type()) + data = memory.sanitize_memory_layout(data, order=order) return dndarray.DNDarray(data, gshape, htype, split, device, comm) @@ -375,7 +376,7 @@ def array( return dndarray.DNDarray(obj, tuple(int(ele) for ele in gshape), dtype, split, device, comm) -def empty(shape, dtype=types.float32, split=None, device=None, comm=None): +def empty(shape, dtype=types.float32, split=None, device=None, comm=None, order="C"): """ Returns a new uninitialized array of given shape and data type. May be allocated split up across multiple nodes along the specified axis. @@ -392,6 +393,11 @@ def empty(shape, dtype=types.float32, split=None, device=None, comm=None): Specifies the device the tensor shall be allocated on, defaults to None (i.e. globally set default device). comm: Communication, optional Handle to the nodes holding distributed parts or copies of this tensor. + order: str, optional + Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array + will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). + Raises NotImplementedError for NumPy options 'K' and 'A'. + #TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released. Returns ------- @@ -410,10 +416,10 @@ def empty(shape, dtype=types.float32, split=None, device=None, comm=None): tensor([[ 0.0000e+00, -2.0000e+00, 3.3113e+35], [ 3.6902e+19, 1.2096e+04, 7.1846e+22]]) """ - return __factory(shape, dtype, split, torch.empty, device, comm) + return __factory(shape, dtype, split, torch.empty, device, comm, order) -def empty_like(a, dtype=None, split=None, device=None, comm=None): +def empty_like(a, dtype=None, split=None, device=None, comm=None, order="C"): """ Returns a new uninitialized array with the same type, shape and data distribution of given object. Data type and data distribution strategy can be explicitly overriden. @@ -448,10 +454,10 @@ def empty_like(a, dtype=None, split=None, device=None, comm=None): tensor([[ 0.0000e+00, -2.0000e+00, 3.3113e+35], [ 3.6902e+19, 1.2096e+04, 7.1846e+22]]) """ - return __factory_like(a, dtype, split, empty, device, comm) + return __factory_like(a, dtype, split, empty, device, comm, order=order) -def eye(shape, dtype=types.float32, split=None, device=None, comm=None): +def eye(shape, dtype=types.float32, split=None, device=None, comm=None, order="C"): """ Returns a new 2-D tensor with ones on the diagonal and zeroes elsewhere. @@ -468,6 +474,11 @@ def eye(shape, dtype=types.float32, split=None, device=None, comm=None): Specifies the device the tensor shall be allocated on, defaults to None (i.e. globally set default device). comm : Communication, optional Handle to the nodes holding distributed parts or copies of this tensor. + order: str, optional + Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array + will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). + Raises NotImplementedError for NumPy options 'K' and 'A'. + #TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released. Returns ------- @@ -508,12 +519,13 @@ def eye(shape, dtype=types.float32, split=None, device=None, comm=None): pos_y = i if split == 1 else i + offset data[pos_x][pos_y] = 1 + data = memory.sanitize_memory_layout(data, order=order) return dndarray.DNDarray( data, gshape, types.canonical_heat_type(data.dtype), split, device, comm ) -def __factory(shape, dtype, split, local_factory, device, comm): +def __factory(shape, dtype, split, local_factory, device, comm, order): """ Abstracted factory function for HeAT tensor initialization. @@ -548,11 +560,11 @@ def __factory(shape, dtype, split, local_factory, device, comm): _, local_shape, _ = comm.chunk(shape, split) # create the torch data using the factory function data = local_factory(local_shape, dtype=dtype.torch_type(), device=device.torch_device) - + data = memory.sanitize_memory_layout(data, order=order) return dndarray.DNDarray(data, shape, dtype, split, device, comm) -def __factory_like(a, dtype, split, factory, device, comm, **kwargs): +def __factory_like(a, dtype, split, factory, device, comm, order="C", **kwargs): """ Abstracted '...-like' factory function for HeAT tensor initialization @@ -570,6 +582,12 @@ def __factory_like(a, dtype, split, factory, device, comm, **kwargs): Specifies the device the tensor shall be allocated on, defaults to None (i.e. globally set default device). comm: Communication Handle to the nodes holding distributed parts or copies of this tensor. + order: str, optional + Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array + will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). + Raises NotImplementedError for NumPy options 'K' and 'A'. + #TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released. + Returns ------- @@ -604,10 +622,10 @@ def __factory_like(a, dtype, split, factory, device, comm, **kwargs): # use the default communicator, if not set comm = sanitize_comm(comm) - return factory(shape, dtype=dtype, split=split, device=device, comm=comm, **kwargs) + return factory(shape, dtype=dtype, split=split, device=device, comm=comm, order=order, **kwargs) -def full(shape, fill_value, dtype=types.float32, split=None, device=None, comm=None): +def full(shape, fill_value, dtype=types.float32, split=None, device=None, comm=None, order="C"): """ Return a new array of given shape and type, filled with fill_value. @@ -644,10 +662,10 @@ def full(shape, fill_value, dtype=types.float32, split=None, device=None, comm=N def local_factory(*args, **kwargs): return torch.full(*args, fill_value=fill_value, **kwargs) - return __factory(shape, dtype, split, local_factory, device, comm) + return __factory(shape, dtype, split, local_factory, device, comm, order=order) -def full_like(a, fill_value, dtype=types.float32, split=None, device=None, comm=None): +def full_like(a, fill_value, dtype=types.float32, split=None, device=None, comm=None, order="C"): """ Return a full array with the same shape and type as a given array. @@ -682,7 +700,7 @@ def full_like(a, fill_value, dtype=types.float32, split=None, device=None, comm= tensor([[1., 1., 1.], [1., 1., 1.]]) """ - return __factory_like(a, dtype, split, full, device, comm, fill_value=fill_value) + return __factory_like(a, dtype, split, full, device, comm, fill_value=fill_value, order=order) def linspace( @@ -842,7 +860,7 @@ def logspace( return pow(base, y).astype(dtype, copy=False) -def ones(shape, dtype=types.float32, split=None, device=None, comm=None): +def ones(shape, dtype=types.float32, split=None, device=None, comm=None, order="C"): """ Returns a new array of given shape and data type filled with one values. May be allocated split up across multiple nodes along the specified axis. @@ -859,6 +877,12 @@ def ones(shape, dtype=types.float32, split=None, device=None, comm=None): Specifies the device the tensor shall be allocated on, defaults to None (i.e. globally set default device). comm : Communication, optional Handle to the nodes holding distributed parts or copies of this tensor. + order: str, optional + Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array + will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). + Raises NotImplementedError for NumPy options 'K' and 'A'. + #TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released. + Returns ------- @@ -877,10 +901,10 @@ def ones(shape, dtype=types.float32, split=None, device=None, comm=None): tensor([[1., 1., 1.], [1., 1., 1.]]) """ - return __factory(shape, dtype, split, torch.ones, device, comm) + return __factory(shape, dtype, split, torch.ones, device, comm, order) -def ones_like(a, dtype=None, split=None, device=None, comm=None): +def ones_like(a, dtype=None, split=None, device=None, comm=None, order="C"): """ Returns a new array filled with ones with the same type, shape and data distribution of given object. Data type and data distribution strategy can be explicitly overriden. @@ -914,10 +938,10 @@ def ones_like(a, dtype=None, split=None, device=None, comm=None): tensor([[1., 1., 1.], [1., 1., 1.]]) """ - return __factory_like(a, dtype, split, ones, device, comm) + return __factory_like(a, dtype, split, ones, device, comm, order=order) -def zeros(shape, dtype=types.float32, split=None, device=None, comm=None): +def zeros(shape, dtype=types.float32, split=None, device=None, comm=None, order="C"): """ Returns a new array of given shape and data type filled with zero values. May be allocated split up across multiple nodes along the specified axis. @@ -934,6 +958,12 @@ def zeros(shape, dtype=types.float32, split=None, device=None, comm=None): Specifies the device the tensor shall be allocated on, defaults to None (i.e. globally set default device). comm: Communication, optional Handle to the nodes holding distributed parts or copies of this tensor. + order: str, optional + Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array + will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). + Raises NotImplementedError for NumPy options 'K' and 'A'. + #TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released. + Returns ------- @@ -952,10 +982,10 @@ def zeros(shape, dtype=types.float32, split=None, device=None, comm=None): tensor([[0., 0., 0.], [0., 0., 0.]]) """ - return __factory(shape, dtype, split, torch.zeros, device, comm) + return __factory(shape, dtype, split, torch.zeros, device, comm, order) -def zeros_like(a, dtype=None, split=None, device=None, comm=None): +def zeros_like(a, dtype=None, split=None, device=None, comm=None, order="C"): """ Returns a new array filled with zeros with the same type, shape and data distribution of given object. Data type and data distribution strategy can be explicitly overriden. @@ -972,6 +1002,11 @@ def zeros_like(a, dtype=None, split=None, device=None, comm=None): Specifies the device the tensor shall be allocated on, defaults to None (i.e. globally set default device). comm: Communication, optional Handle to the nodes holding distributed parts or copies of this tensor. + order: str, optional + Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array + will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). + Raises NotImplementedError for NumPy options 'K' and 'A'. + #TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released. Returns ------- @@ -989,4 +1024,4 @@ def zeros_like(a, dtype=None, split=None, device=None, comm=None): tensor([[0., 0., 0.], [0., 0., 0.]]) """ - return __factory_like(a, dtype, split, zeros, device, comm) + return __factory_like(a, dtype, split, zeros, device, comm, order=order) From 12934d107bcc1d3da41accd8f34ce93eb1e149e7 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Thu, 28 Nov 2019 09:36:41 +0100 Subject: [PATCH 14/40] Typo. --- heat/core/memory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/heat/core/memory.py b/heat/core/memory.py index 8a2f25514f..5664032a49 100644 --- a/heat/core/memory.py +++ b/heat/core/memory.py @@ -54,7 +54,7 @@ def sanitize_memory_layout(x, order="C"): x = x.set_(x.storage(), x.storage_offset(), shape, new_stride) if not row_major and not column_major: raise NotImplementedError( - "Expecting row/major or column-major memory layout, not implemented for alternative layouts." + "Expecting row-major or column-major memory layout, not implemented for alternative layouts." ) if order == "K": raise NotImplementedError( From c442fa84e1b6f78625d0a3fc55d88e2474e0fecf Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Thu, 28 Nov 2019 14:35:27 +0100 Subject: [PATCH 15/40] Implemented function assertTrue_memory_layout() --- heat/core/tests/test_suites/basic_test.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/heat/core/tests/test_suites/basic_test.py b/heat/core/tests/test_suites/basic_test.py index 5a6f19a241..f6822bcc92 100644 --- a/heat/core/tests/test_suites/basic_test.py +++ b/heat/core/tests/test_suites/basic_test.py @@ -209,6 +209,25 @@ def assert_func_equal( else: self.assertTrue(np.array_equal(ht_res._DNDarray__array.numpy(), np_res)) + def assertTrue_memory_layout(self, tensor, order): + """ + Checks that the memory layout of a given heat tensor is as specified by argument order. + + Parameters: + ----------- + order: str, 'C' for C-like (row-major), 'F' for Fortran-like (column-major) memory layout. + """ + dims = list(range(tensor._DNDarray__array.ndim)) + stride = tensor._DNDarray__array.stride() + row_major = all(stride[i] > stride[i + 1] for i in dims[:-1]) + column_major = all(stride[i] < stride[i + 1] for i in dims[:-1]) + if order == "C": + return self.assertTrue(row_major) + elif order == "F": + return self.assertTrue(column_major) + else: + raise ValueError("expected order to be 'C' or 'F', but was {}".format(order)) + def _create_random_array(self, shape): seed = np.random.randint(1000000, size=(1,)) self.comm.Bcast(seed, root=0) From 1da8a74e801793a32ff3c5bcf7bb2ae7046f6c5c Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Thu, 28 Nov 2019 14:36:53 +0100 Subject: [PATCH 16/40] Modified sanitize_memory_layout() to address tensors with unknown/wrong layout (e.g. from tensor creation with ndmin > tensor.shape). --- heat/core/memory.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/heat/core/memory.py b/heat/core/memory.py index 5664032a49..ae697523d6 100644 --- a/heat/core/memory.py +++ b/heat/core/memory.py @@ -39,23 +39,24 @@ def sanitize_memory_layout(x, order="C"): Default is 'C' as in C-like (row-major) memory layout. The array is stored in memory rows first. Alternative is 'F', as in Fortran-like (column-major) memory layout. The array is stored columns first. """ + if x.ndim < 2: + return x dims = list(range(x.ndim)) shape = x.shape - row_major = bool(x.stride()[i] > x.stride()[i + 1] for i in dims) - column_major = False if row_major else bool(x.stride()[i] < x.stride()[i + 1] for i in dims) + stride = x.stride() + row_major = all(np.diff(list(stride)) < 0) + column_major = all(np.diff(list(stride)) > 0) + if order == "C": + if not row_major: + stride = tuple(np.prod(shape[i + 1 :]) for i in dims[:-1]) + (1,) + if order == "F": + if not column_major: + stride = (1,) + tuple(np.prod(shape[-x.ndim : -x.ndim + i]) for i in dims[1:]) if (order == "C" and column_major) or (order == "F" and row_major): - if column_major: - new_stride = tuple(np.prod(shape[i + 1 :]) for i in dims[:-1]) + (1,) - elif row_major: - new_stride = (1,) + tuple(np.prod(shape[-x.ndim : -x.ndim + i]) for i in dims[1:]) dims[0], dims[-1] = dims[-1], dims[0] - permutation = tuple(dims) - x = x.permute(permutation).contiguous() - x = x.set_(x.storage(), x.storage_offset(), shape, new_stride) - if not row_major and not column_major: - raise NotImplementedError( - "Expecting row-major or column-major memory layout, not implemented for alternative layouts." - ) + permutation = tuple(dims) + x = x.permute(permutation).contiguous() + x = x.set_(x.storage(), x.storage_offset(), shape, stride) if order == "K": raise NotImplementedError( "Internal usage of torch.clone() means losing original memory layout for now. \n Please specify order='C' for row-major, order='F' for column-major layout." From bb9a2b6ab02cbc9406f4d18620187a8c551e7833 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Fri, 29 Nov 2019 09:56:53 +0100 Subject: [PATCH 17/40] sanitize_memory_layout(), removed unnecessary checks (ndim < 2 and strides already matching requested order), removed "NotImplementedError" for order K. --- heat/core/memory.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/heat/core/memory.py b/heat/core/memory.py index ae697523d6..ee2d94ca77 100644 --- a/heat/core/memory.py +++ b/heat/core/memory.py @@ -40,18 +40,20 @@ def sanitize_memory_layout(x, order="C"): Alternative is 'F', as in Fortran-like (column-major) memory layout. The array is stored columns first. """ if x.ndim < 2: + # do nothing return x dims = list(range(x.ndim)) shape = x.shape stride = x.stride() row_major = all(np.diff(list(stride)) < 0) column_major = all(np.diff(list(stride)) > 0) + if (order == "C" and row_major) or (order == "F" and column_major): + # do nothing + return x if order == "C": - if not row_major: - stride = tuple(np.prod(shape[i + 1 :]) for i in dims[:-1]) + (1,) + stride = tuple(np.prod(shape[i + 1 :]) for i in dims[:-1]) + (1,) if order == "F": - if not column_major: - stride = (1,) + tuple(np.prod(shape[-x.ndim : -x.ndim + i]) for i in dims[1:]) + stride = (1,) + tuple(np.prod(shape[-x.ndim : -x.ndim + i]) for i in dims[1:]) if (order == "C" and column_major) or (order == "F" and row_major): dims[0], dims[-1] = dims[-1], dims[0] permutation = tuple(dims) From c62974b5e593a91a35ad21f4fe6349e8aa4463a4 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Fri, 29 Nov 2019 10:14:55 +0100 Subject: [PATCH 18/40] Added keyword argument order in factories calls. --- heat/core/factories.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/heat/core/factories.py b/heat/core/factories.py index 2c272d05d7..858f4b27c8 100644 --- a/heat/core/factories.py +++ b/heat/core/factories.py @@ -128,7 +128,6 @@ def arange(*args, dtype=None, split=None, device=None, comm=None): htype = types.canonical_heat_type(dtype) data = data.type(htype.torch_type()) - data = memory.sanitize_memory_layout(data, order=order) return dndarray.DNDarray(data, gshape, htype, split, device, comm) @@ -982,7 +981,7 @@ def zeros(shape, dtype=types.float32, split=None, device=None, comm=None, order= tensor([[0., 0., 0.], [0., 0., 0.]]) """ - return __factory(shape, dtype, split, torch.zeros, device, comm, order) + return __factory(shape, dtype, split, torch.zeros, device, comm, order=order) def zeros_like(a, dtype=None, split=None, device=None, comm=None, order="C"): From e18eee6ed49fcf8325cecf33e8927d786b65333f Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Fri, 29 Nov 2019 10:16:45 +0100 Subject: [PATCH 19/40] First draft of function test_sanitize_memory_layout() --- heat/core/tests/test_memory.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/heat/core/tests/test_memory.py b/heat/core/tests/test_memory.py index b680311d6a..a827086de4 100644 --- a/heat/core/tests/test_memory.py +++ b/heat/core/tests/test_memory.py @@ -1,7 +1,9 @@ import unittest - +import torch import heat as ht +from heat.core.tests.test_suites.basic_test import BasicTest + class TestMemory(unittest.TestCase): def test_copy(self): @@ -16,3 +18,19 @@ def test_copy(self): # test exceptions with self.assertRaises(TypeError): ht.copy("hello world") + + def test_sanitize_memory_layout(self): + # non distributed, 2D + a_torch = torch.arange(12).reshape(4, 3) + a_heat_C = ht.array(a_torch) + a_heat_F = ht.array(a_torch, order="F") + BasicTest.assertTrue_memory_layout(self, a_heat_C, "C") + BasicTest.assertTrue_memory_layout(self, a_heat_F, "F") + # non distributed, 4D + # non distributed, after reduction operation + # distributed, split, 2D + # distributed, split, 4D + # distributed, is_split, 2D + # distributed, is_split, 4D + # distributed, after reduction operation + From c725a8b13a9f1ccec308cea530fab8799897af34 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Fri, 29 Nov 2019 10:38:55 +0100 Subject: [PATCH 20/40] Modified row_major, column_major condition to allow for shape=1 along a given dimension --- heat/core/memory.py | 4 ++-- heat/core/tests/test_suites/basic_test.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/heat/core/memory.py b/heat/core/memory.py index ee2d94ca77..2bd2124ccb 100644 --- a/heat/core/memory.py +++ b/heat/core/memory.py @@ -45,8 +45,8 @@ def sanitize_memory_layout(x, order="C"): dims = list(range(x.ndim)) shape = x.shape stride = x.stride() - row_major = all(np.diff(list(stride)) < 0) - column_major = all(np.diff(list(stride)) > 0) + row_major = all(np.diff(list(stride)) <= 0) + column_major = all(np.diff(list(stride)) >= 0) if (order == "C" and row_major) or (order == "F" and column_major): # do nothing return x diff --git a/heat/core/tests/test_suites/basic_test.py b/heat/core/tests/test_suites/basic_test.py index f6822bcc92..61a9a6236a 100644 --- a/heat/core/tests/test_suites/basic_test.py +++ b/heat/core/tests/test_suites/basic_test.py @@ -219,8 +219,8 @@ def assertTrue_memory_layout(self, tensor, order): """ dims = list(range(tensor._DNDarray__array.ndim)) stride = tensor._DNDarray__array.stride() - row_major = all(stride[i] > stride[i + 1] for i in dims[:-1]) - column_major = all(stride[i] < stride[i + 1] for i in dims[:-1]) + row_major = all(np.diff(list(stride)) <= 0) + column_major = all(np.diff(list(stride)) >= 0) if order == "C": return self.assertTrue(row_major) elif order == "F": From 76a1617285472772865b7dc57fa4650543e703ae Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Fri, 29 Nov 2019 10:40:22 +0100 Subject: [PATCH 21/40] Added memory layout test for 5D tensor, non distributed, shape contains 1. --- heat/core/tests/test_memory.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/heat/core/tests/test_memory.py b/heat/core/tests/test_memory.py index a827086de4..7aa828e3e4 100644 --- a/heat/core/tests/test_memory.py +++ b/heat/core/tests/test_memory.py @@ -26,7 +26,12 @@ def test_sanitize_memory_layout(self): a_heat_F = ht.array(a_torch, order="F") BasicTest.assertTrue_memory_layout(self, a_heat_C, "C") BasicTest.assertTrue_memory_layout(self, a_heat_F, "F") - # non distributed, 4D + # non distributed, 5D + a_torch_5d = torch.arange(4 * 3 * 5 * 2 * 1).reshape(4, 3, 1, 2, 5) + a_heat_5d_C = ht.array(a_torch_5d) + a_heat_5d_F = ht.array(a_torch_5d, order="F") + BasicTest.assertTrue_memory_layout(self, a_heat_5d_C, "C") + BasicTest.assertTrue_memory_layout(self, a_heat_5d_F, "F") # non distributed, after reduction operation # distributed, split, 2D # distributed, split, 4D From 5628fac81e81c1d1d438517e640226f57fc1622d Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Fri, 29 Nov 2019 10:52:00 +0100 Subject: [PATCH 22/40] Added memory layout test for non distributed tensor after reduction operation --- heat/core/tests/test_memory.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/heat/core/tests/test_memory.py b/heat/core/tests/test_memory.py index 7aa828e3e4..5e109175c9 100644 --- a/heat/core/tests/test_memory.py +++ b/heat/core/tests/test_memory.py @@ -33,6 +33,15 @@ def test_sanitize_memory_layout(self): BasicTest.assertTrue_memory_layout(self, a_heat_5d_C, "C") BasicTest.assertTrue_memory_layout(self, a_heat_5d_F, "F") # non distributed, after reduction operation + a_heat_5d_C_reduce = a_heat_5d_C.sum(-1) + a_heat_5d_F_reduce = a_heat_5d_F.sum(-1) + BasicTest.assertTrue_memory_layout(self, a_heat_5d_C_reduce, "C") + BasicTest.assertTrue_memory_layout(self, a_heat_5d_F_reduce, "F") + numpy_args = {"axis": -1} + heat_args = {"axis": -1} + BasicTest.assert_func_equal( + self, a_heat_F_reduce, ht.sum, np.sum, heat_args=heat_args, numpy_args=numpy_args + ) # distributed, split, 2D # distributed, split, 4D # distributed, is_split, 2D From 932a85e491b7e430f09a87107c5a1e3cf838b565 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Fri, 29 Nov 2019 11:21:51 +0100 Subject: [PATCH 23/40] Removed test for non distributed tensor after reduction operation, not relevant for now --- heat/core/tests/test_memory.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/heat/core/tests/test_memory.py b/heat/core/tests/test_memory.py index 5e109175c9..34038fc8cc 100644 --- a/heat/core/tests/test_memory.py +++ b/heat/core/tests/test_memory.py @@ -1,5 +1,6 @@ import unittest import torch +import numpy as np import heat as ht from heat.core.tests.test_suites.basic_test import BasicTest @@ -32,16 +33,6 @@ def test_sanitize_memory_layout(self): a_heat_5d_F = ht.array(a_torch_5d, order="F") BasicTest.assertTrue_memory_layout(self, a_heat_5d_C, "C") BasicTest.assertTrue_memory_layout(self, a_heat_5d_F, "F") - # non distributed, after reduction operation - a_heat_5d_C_reduce = a_heat_5d_C.sum(-1) - a_heat_5d_F_reduce = a_heat_5d_F.sum(-1) - BasicTest.assertTrue_memory_layout(self, a_heat_5d_C_reduce, "C") - BasicTest.assertTrue_memory_layout(self, a_heat_5d_F_reduce, "F") - numpy_args = {"axis": -1} - heat_args = {"axis": -1} - BasicTest.assert_func_equal( - self, a_heat_F_reduce, ht.sum, np.sum, heat_args=heat_args, numpy_args=numpy_args - ) # distributed, split, 2D # distributed, split, 4D # distributed, is_split, 2D From c3ede7234dfc45ad9d4d03211d0ed272afbf3ea7 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Fri, 29 Nov 2019 12:47:32 +0100 Subject: [PATCH 24/40] Implemented tests for sanitize_memory_layout() --- heat/core/tests/test_memory.py | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/heat/core/tests/test_memory.py b/heat/core/tests/test_memory.py index 34038fc8cc..e9a6729530 100644 --- a/heat/core/tests/test_memory.py +++ b/heat/core/tests/test_memory.py @@ -34,8 +34,34 @@ def test_sanitize_memory_layout(self): BasicTest.assertTrue_memory_layout(self, a_heat_5d_C, "C") BasicTest.assertTrue_memory_layout(self, a_heat_5d_F, "F") # distributed, split, 2D - # distributed, split, 4D + size = BasicTest.get_size(a_heat_5d_C) + a_torch_2d = torch.arange(4 * size * 3 * size).reshape(4 * size, 3 * size) + a_heat_C_split = ht.array(a_torch_2d, split=0) + a_heat_F_split = ht.array(a_torch_2d, split=1, order="F") + BasicTest.assertTrue_memory_layout(self, a_heat_C_split, "C") + BasicTest.assertTrue_memory_layout(self, a_heat_F_split, "F") + a_heat_F_split_sum = a_heat_F_split.sum(1) + a_torch_sum = a_torch_2d.sum(1) + BasicTest.assert_array_equal(self, a_heat_F_split_sum, a_torch_sum) + # distributed, split, 5D + a_torch_5d = torch.arange(4 * 3 * 5 * 2 * size * 1).reshape(4, 3, 1, 2 * size, 5) + a_heat_5d_C_split = ht.array(a_torch_5d, split=-2) + a_heat_5d_F_split = ht.array(a_torch_5d, split=-2, order="F") + BasicTest.assertTrue_memory_layout(self, a_heat_5d_C_split, "C") + BasicTest.assertTrue_memory_layout(self, a_heat_5d_F_split, "F") + a_heat_5d_F_split_sum = a_heat_5d_F_split.sum(-2) + a_torch_5d_sum = a_torch_5d.sum(-2) + BasicTest.assert_array_equal(self, a_heat_F_split_sum, a_torch_sum) # distributed, is_split, 2D - # distributed, is_split, 4D - # distributed, after reduction operation - + a_heat_C_issplit = ht.array(a_torch_2d, is_split=0) + a_heat_F_issplit = ht.array(a_torch_2d, is_split=1, order="F") + BasicTest.assertTrue_memory_layout(self, a_heat_C_issplit, "C") + BasicTest.assertTrue_memory_layout(self, a_heat_F_issplit, "F") + # distributed, is_split, 5D + a_heat_5d_C_issplit = ht.array(a_torch_5d, is_split=-2) + a_heat_5d_F_issplit = ht.array(a_torch_5d, is_split=-2, order="F") + BasicTest.assertTrue_memory_layout(self, a_heat_5d_C_issplit, "C") + BasicTest.assertTrue_memory_layout(self, a_heat_5d_F_issplit, "F") + # test exceptions + with self.assertRaises(NotImplementedError): + ht.zeros_like(a_heat_5d_C_split, order="K") From 5beea49098ea32ee10500ba3bce240c4bfdfa431 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Fri, 29 Nov 2019 12:50:39 +0100 Subject: [PATCH 25/40] In assert_array_equal(), Allreduce running on self._comm, not on self.comm. --- heat/core/tests/test_suites/basic_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/heat/core/tests/test_suites/basic_test.py b/heat/core/tests/test_suites/basic_test.py index 61a9a6236a..637bf4da81 100644 --- a/heat/core/tests/test_suites/basic_test.py +++ b/heat/core/tests/test_suites/basic_test.py @@ -101,7 +101,7 @@ def assert_array_equal(self, heat_array, expected_array): # Array is distributed correctly equal_res = np.array(compare_func(local_numpy, expected_array[slices])) - self.comm.Allreduce(MPI.IN_PLACE, equal_res, MPI.LAND) + self._comm.Allreduce(MPI.IN_PLACE, equal_res, MPI.LAND) self.assertTrue(equal_res, "Local tensors do not match the corresponding numpy slices.") self.assertEqual( local_numpy.dtype, From 43df1ab58888582364931519cd0b5eca87c97d6b Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 2 Dec 2019 10:12:55 +0100 Subject: [PATCH 26/40] Removing unused variable. --- heat/core/tests/test_suites/basic_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/heat/core/tests/test_suites/basic_test.py b/heat/core/tests/test_suites/basic_test.py index 637bf4da81..226770e942 100644 --- a/heat/core/tests/test_suites/basic_test.py +++ b/heat/core/tests/test_suites/basic_test.py @@ -217,7 +217,6 @@ def assertTrue_memory_layout(self, tensor, order): ----------- order: str, 'C' for C-like (row-major), 'F' for Fortran-like (column-major) memory layout. """ - dims = list(range(tensor._DNDarray__array.ndim)) stride = tensor._DNDarray__array.stride() row_major = all(np.diff(list(stride)) <= 0) column_major = all(np.diff(list(stride)) >= 0) From e47ac209670f2b0fdd03ba1b985f807c0107558c Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 2 Dec 2019 14:24:52 +0100 Subject: [PATCH 27/40] Fixed error that messed up column-major memory layout for ndim>3 --- heat/core/memory.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/heat/core/memory.py b/heat/core/memory.py index 2bd2124ccb..d95e37f1dd 100644 --- a/heat/core/memory.py +++ b/heat/core/memory.py @@ -1,4 +1,5 @@ import numpy as np +import torch from . import dndarray __all__ = ["copy", "sanitize_memory_layout"] @@ -35,7 +36,7 @@ def sanitize_memory_layout(x, order="C"): x: torch.tensor Input data - order: str, optional. + order: str, optional. Default is 'C' as in C-like (row-major) memory layout. The array is stored in memory rows first. Alternative is 'F', as in Fortran-like (column-major) memory layout. The array is stored columns first. """ @@ -43,24 +44,24 @@ def sanitize_memory_layout(x, order="C"): # do nothing return x dims = list(range(x.ndim)) - shape = x.shape - stride = x.stride() - row_major = all(np.diff(list(stride)) <= 0) - column_major = all(np.diff(list(stride)) >= 0) + stride = list(x.stride()) + row_major = all(np.diff(stride) <= 0) + column_major = all(np.diff(stride) >= 0) if (order == "C" and row_major) or (order == "F" and column_major): # do nothing return x - if order == "C": - stride = tuple(np.prod(shape[i + 1 :]) for i in dims[:-1]) + (1,) - if order == "F": - stride = (1,) + tuple(np.prod(shape[-x.ndim : -x.ndim + i]) for i in dims[1:]) if (order == "C" and column_major) or (order == "F" and row_major): - dims[0], dims[-1] = dims[-1], dims[0] - permutation = tuple(dims) - x = x.permute(permutation).contiguous() - x = x.set_(x.storage(), x.storage_offset(), shape, stride) + dims = tuple(reversed(dims)) + y = torch.empty_like(x) + permutation = x.permute(dims).contiguous() + y = y.set_( + permutation.storage(), + x.storage_offset(), + x.shape, + tuple(reversed(permutation.stride())), + ) if order == "K": raise NotImplementedError( "Internal usage of torch.clone() means losing original memory layout for now. \n Please specify order='C' for row-major, order='F' for column-major layout." ) - return x + return y From 3a096ad640c15743db62b0ea2ce2c80fdebbfec4 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 2 Dec 2019 14:28:01 +0100 Subject: [PATCH 28/40] Fixed and expanded tests for sanitize_memory_layout --- heat/core/tests/test_memory.py | 35 +++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/heat/core/tests/test_memory.py b/heat/core/tests/test_memory.py index e9a6729530..44ff72e35b 100644 --- a/heat/core/tests/test_memory.py +++ b/heat/core/tests/test_memory.py @@ -33,35 +33,44 @@ def test_sanitize_memory_layout(self): a_heat_5d_F = ht.array(a_torch_5d, order="F") BasicTest.assertTrue_memory_layout(self, a_heat_5d_C, "C") BasicTest.assertTrue_memory_layout(self, a_heat_5d_F, "F") + a_heat_5d_F_sum = a_heat_5d_F.sum(-2) + a_torch_5d_sum = a_torch_5d.sum(-2) + BasicTest.assert_array_equal(self, a_heat_5d_F_sum, a_torch_5d_sum) # distributed, split, 2D size = BasicTest.get_size(a_heat_5d_C) a_torch_2d = torch.arange(4 * size * 3 * size).reshape(4 * size, 3 * size) - a_heat_C_split = ht.array(a_torch_2d, split=0) - a_heat_F_split = ht.array(a_torch_2d, split=1, order="F") - BasicTest.assertTrue_memory_layout(self, a_heat_C_split, "C") - BasicTest.assertTrue_memory_layout(self, a_heat_F_split, "F") - a_heat_F_split_sum = a_heat_F_split.sum(1) - a_torch_sum = a_torch_2d.sum(1) - BasicTest.assert_array_equal(self, a_heat_F_split_sum, a_torch_sum) + a_heat_2d_C_split = ht.array(a_torch_2d, split=0) + a_heat_2d_F_split = ht.array(a_torch_2d, split=1, order="F") + BasicTest.assertTrue_memory_layout(self, a_heat_2d_C_split, "C") + BasicTest.assertTrue_memory_layout(self, a_heat_2d_F_split, "F") + a_heat_2d_F_split_sum = a_heat_2d_F_split.sum(1) + a_torch_2d_sum = a_torch_2d.sum(1) + BasicTest.assert_array_equal(self, a_heat_2d_F_split_sum, a_torch_2d_sum) # distributed, split, 5D - a_torch_5d = torch.arange(4 * 3 * 5 * 2 * size * 1).reshape(4, 3, 1, 2 * size, 5) + a_torch_5d = torch.arange(4 * 3 * 5 * 2 * size * 7).reshape(4, 3, 7, 2 * size, 5) a_heat_5d_C_split = ht.array(a_torch_5d, split=-2) a_heat_5d_F_split = ht.array(a_torch_5d, split=-2, order="F") BasicTest.assertTrue_memory_layout(self, a_heat_5d_C_split, "C") BasicTest.assertTrue_memory_layout(self, a_heat_5d_F_split, "F") a_heat_5d_F_split_sum = a_heat_5d_F_split.sum(-2) a_torch_5d_sum = a_torch_5d.sum(-2) - BasicTest.assert_array_equal(self, a_heat_F_split_sum, a_torch_sum) + BasicTest.assert_array_equal(self, a_heat_5d_F_split_sum, a_torch_5d_sum) # distributed, is_split, 2D - a_heat_C_issplit = ht.array(a_torch_2d, is_split=0) - a_heat_F_issplit = ht.array(a_torch_2d, is_split=1, order="F") - BasicTest.assertTrue_memory_layout(self, a_heat_C_issplit, "C") - BasicTest.assertTrue_memory_layout(self, a_heat_F_issplit, "F") + a_heat_2d_C_issplit = ht.array(a_torch_2d, is_split=0) + a_heat_2d_F_issplit = ht.array(a_torch_2d, is_split=1, order="F") + BasicTest.assertTrue_memory_layout(self, a_heat_2d_C_issplit, "C") + BasicTest.assertTrue_memory_layout(self, a_heat_2d_F_issplit, "F") + a_heat_2d_F_issplit_sum = a_heat_2d_F_issplit.sum(1) + a_torch_2d_sum = a_torch_2d.sum(1) + BasicTest.assert_array_equal(self, a_heat_2d_F_split_sum, a_torch_2d_sum) # distributed, is_split, 5D a_heat_5d_C_issplit = ht.array(a_torch_5d, is_split=-2) a_heat_5d_F_issplit = ht.array(a_torch_5d, is_split=-2, order="F") BasicTest.assertTrue_memory_layout(self, a_heat_5d_C_issplit, "C") BasicTest.assertTrue_memory_layout(self, a_heat_5d_F_issplit, "F") + a_heat_5d_F_issplit_sum = a_heat_5d_F_issplit.sum(-2) + a_torch_5d_sum = a_torch_5d.sum(-2) * size + BasicTest.assert_array_equal(self, a_heat_5d_F_issplit_sum, a_torch_5d_sum) # test exceptions with self.assertRaises(NotImplementedError): ht.zeros_like(a_heat_5d_C_split, order="K") From 814caa997abee0f0c77d25b29e11b920ad66e375 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 2 Dec 2019 14:32:17 +0100 Subject: [PATCH 29/40] Fixed wrong variable reference after pre-commit --- heat/core/tests/test_memory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/heat/core/tests/test_memory.py b/heat/core/tests/test_memory.py index 44ff72e35b..2653dc643c 100644 --- a/heat/core/tests/test_memory.py +++ b/heat/core/tests/test_memory.py @@ -61,8 +61,8 @@ def test_sanitize_memory_layout(self): BasicTest.assertTrue_memory_layout(self, a_heat_2d_C_issplit, "C") BasicTest.assertTrue_memory_layout(self, a_heat_2d_F_issplit, "F") a_heat_2d_F_issplit_sum = a_heat_2d_F_issplit.sum(1) - a_torch_2d_sum = a_torch_2d.sum(1) - BasicTest.assert_array_equal(self, a_heat_2d_F_split_sum, a_torch_2d_sum) + a_torch_2d_sum = a_torch_2d.sum(1) * size + BasicTest.assert_array_equal(self, a_heat_2d_F_issplit_sum, a_torch_2d_sum) # distributed, is_split, 5D a_heat_5d_C_issplit = ht.array(a_torch_5d, is_split=-2) a_heat_5d_F_issplit = ht.array(a_torch_5d, is_split=-2, order="F") From b659576ca9c1f9988b5e399c48b513a1b3e69d1a Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 2 Dec 2019 14:33:33 +0100 Subject: [PATCH 30/40] pre-commit changes --- heat/core/dndarray.py | 4 ++-- heat/core/factories.py | 42 +++++++++++++++++++++--------------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/heat/core/dndarray.py b/heat/core/dndarray.py index e819c5ecdb..c0865e0a6a 100644 --- a/heat/core/dndarray.py +++ b/heat/core/dndarray.py @@ -165,7 +165,7 @@ def split(self): int : the axis on which the tensor split """ return self.__split - + @property def stride(self): """ @@ -186,7 +186,7 @@ def strides(self): """ stride = np.array(self._DNDarray__array.stride()) itemsize = self._DNDarray__array.storage().element_size() - return tuple(stride*itemsize) + return tuple(stride * itemsize) @property def T(self, axes=None): diff --git a/heat/core/factories.py b/heat/core/factories.py index 858f4b27c8..7e8ee248cb 100644 --- a/heat/core/factories.py +++ b/heat/core/factories.py @@ -161,9 +161,9 @@ def array( Specifies the minimum number of dimensions that the resulting array should have. Ones will, if needed, be attached to the shape if ndim>0 and prefaced in case of ndim<0 to meet the requirement. order: str, optional - Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array - will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). - Raises NotImplementedError for NumPy options 'K' and 'A'. + Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array + will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). + Raises NotImplementedError for NumPy options 'K' and 'A'. #TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released. split : None or int, optional The axis along which the passed array content obj is split and distributed in memory. Mutually exclusive with @@ -393,9 +393,9 @@ def empty(shape, dtype=types.float32, split=None, device=None, comm=None, order= comm: Communication, optional Handle to the nodes holding distributed parts or copies of this tensor. order: str, optional - Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array - will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). - Raises NotImplementedError for NumPy options 'K' and 'A'. + Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array + will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). + Raises NotImplementedError for NumPy options 'K' and 'A'. #TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released. Returns @@ -474,9 +474,9 @@ def eye(shape, dtype=types.float32, split=None, device=None, comm=None, order="C comm : Communication, optional Handle to the nodes holding distributed parts or copies of this tensor. order: str, optional - Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array - will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). - Raises NotImplementedError for NumPy options 'K' and 'A'. + Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array + will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). + Raises NotImplementedError for NumPy options 'K' and 'A'. #TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released. Returns @@ -582,9 +582,9 @@ def __factory_like(a, dtype, split, factory, device, comm, order="C", **kwargs): comm: Communication Handle to the nodes holding distributed parts or copies of this tensor. order: str, optional - Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array - will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). - Raises NotImplementedError for NumPy options 'K' and 'A'. + Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array + will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). + Raises NotImplementedError for NumPy options 'K' and 'A'. #TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released. @@ -877,9 +877,9 @@ def ones(shape, dtype=types.float32, split=None, device=None, comm=None, order=" comm : Communication, optional Handle to the nodes holding distributed parts or copies of this tensor. order: str, optional - Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array - will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). - Raises NotImplementedError for NumPy options 'K' and 'A'. + Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array + will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). + Raises NotImplementedError for NumPy options 'K' and 'A'. #TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released. @@ -958,9 +958,9 @@ def zeros(shape, dtype=types.float32, split=None, device=None, comm=None, order= comm: Communication, optional Handle to the nodes holding distributed parts or copies of this tensor. order: str, optional - Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array - will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). - Raises NotImplementedError for NumPy options 'K' and 'A'. + Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array + will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). + Raises NotImplementedError for NumPy options 'K' and 'A'. #TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released. @@ -1002,9 +1002,9 @@ def zeros_like(a, dtype=None, split=None, device=None, comm=None, order="C"): comm: Communication, optional Handle to the nodes holding distributed parts or copies of this tensor. order: str, optional - Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array - will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). - Raises NotImplementedError for NumPy options 'K' and 'A'. + Options: 'C' or 'F'. Specifies the memory layout of the newly created tensor. Default is order='C', meaning the array + will be stored in row-major order (C-like). If order=‘F’, the array will be stored in column-major order (Fortran-like). + Raises NotImplementedError for NumPy options 'K' and 'A'. #TODO: implement 'K' option when torch.clone() fix to preserve memory layout is released. Returns From eea901bb2dcf7f197d9a988ce535e122f08b7694 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 2 Dec 2019 14:49:50 +0100 Subject: [PATCH 31/40] Improved docs. --- heat/core/memory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/heat/core/memory.py b/heat/core/memory.py index d95e37f1dd..dc98d14ff9 100644 --- a/heat/core/memory.py +++ b/heat/core/memory.py @@ -37,8 +37,8 @@ def sanitize_memory_layout(x, order="C"): Input data order: str, optional. - Default is 'C' as in C-like (row-major) memory layout. The array is stored in memory rows first. - Alternative is 'F', as in Fortran-like (column-major) memory layout. The array is stored columns first. + Default is 'C' as in C-like (row-major) memory layout. The array is stored first dimension first (rows first if ndim=2). + Alternative is 'F', as in Fortran-like (column-major) memory layout. The array is stored last dimension first (columns first if ndim=2). """ if x.ndim < 2: # do nothing From c38dedec26a2ea89c3d16cbf75f92d2fa214dd57 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Fri, 6 Dec 2019 13:23:55 +0100 Subject: [PATCH 32/40] Extending BasicTest in TestMemory --- heat/core/tests/test_memory.py | 38 +++++++++++++++++----------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/heat/core/tests/test_memory.py b/heat/core/tests/test_memory.py index 2653dc643c..22fb3a4a55 100644 --- a/heat/core/tests/test_memory.py +++ b/heat/core/tests/test_memory.py @@ -6,7 +6,7 @@ from heat.core.tests.test_suites.basic_test import BasicTest -class TestMemory(unittest.TestCase): +class TestMemory(BasicTest): def test_copy(self): tensor = ht.ones(5) copied = tensor.copy() @@ -25,52 +25,52 @@ def test_sanitize_memory_layout(self): a_torch = torch.arange(12).reshape(4, 3) a_heat_C = ht.array(a_torch) a_heat_F = ht.array(a_torch, order="F") - BasicTest.assertTrue_memory_layout(self, a_heat_C, "C") - BasicTest.assertTrue_memory_layout(self, a_heat_F, "F") + self.assertTrue_memory_layout(a_heat_C, "C") + self.assertTrue_memory_layout(a_heat_F, "F") # non distributed, 5D a_torch_5d = torch.arange(4 * 3 * 5 * 2 * 1).reshape(4, 3, 1, 2, 5) a_heat_5d_C = ht.array(a_torch_5d) a_heat_5d_F = ht.array(a_torch_5d, order="F") - BasicTest.assertTrue_memory_layout(self, a_heat_5d_C, "C") - BasicTest.assertTrue_memory_layout(self, a_heat_5d_F, "F") + self.assertTrue_memory_layout(a_heat_5d_C, "C") + self.assertTrue_memory_layout(a_heat_5d_F, "F") a_heat_5d_F_sum = a_heat_5d_F.sum(-2) a_torch_5d_sum = a_torch_5d.sum(-2) - BasicTest.assert_array_equal(self, a_heat_5d_F_sum, a_torch_5d_sum) + self.assert_array_equal(a_heat_5d_F_sum, a_torch_5d_sum) # distributed, split, 2D - size = BasicTest.get_size(a_heat_5d_C) + size = ht.communication.MPI_WORLD.size a_torch_2d = torch.arange(4 * size * 3 * size).reshape(4 * size, 3 * size) a_heat_2d_C_split = ht.array(a_torch_2d, split=0) a_heat_2d_F_split = ht.array(a_torch_2d, split=1, order="F") - BasicTest.assertTrue_memory_layout(self, a_heat_2d_C_split, "C") - BasicTest.assertTrue_memory_layout(self, a_heat_2d_F_split, "F") + self.assertTrue_memory_layout(a_heat_2d_C_split, "C") + self.assertTrue_memory_layout(a_heat_2d_F_split, "F") a_heat_2d_F_split_sum = a_heat_2d_F_split.sum(1) a_torch_2d_sum = a_torch_2d.sum(1) - BasicTest.assert_array_equal(self, a_heat_2d_F_split_sum, a_torch_2d_sum) + self.assert_array_equal(a_heat_2d_F_split_sum, a_torch_2d_sum) # distributed, split, 5D a_torch_5d = torch.arange(4 * 3 * 5 * 2 * size * 7).reshape(4, 3, 7, 2 * size, 5) a_heat_5d_C_split = ht.array(a_torch_5d, split=-2) a_heat_5d_F_split = ht.array(a_torch_5d, split=-2, order="F") - BasicTest.assertTrue_memory_layout(self, a_heat_5d_C_split, "C") - BasicTest.assertTrue_memory_layout(self, a_heat_5d_F_split, "F") + self.assertTrue_memory_layout(a_heat_5d_C_split, "C") + self.assertTrue_memory_layout(a_heat_5d_F_split, "F") a_heat_5d_F_split_sum = a_heat_5d_F_split.sum(-2) a_torch_5d_sum = a_torch_5d.sum(-2) - BasicTest.assert_array_equal(self, a_heat_5d_F_split_sum, a_torch_5d_sum) + self.assert_array_equal(a_heat_5d_F_split_sum, a_torch_5d_sum) # distributed, is_split, 2D a_heat_2d_C_issplit = ht.array(a_torch_2d, is_split=0) a_heat_2d_F_issplit = ht.array(a_torch_2d, is_split=1, order="F") - BasicTest.assertTrue_memory_layout(self, a_heat_2d_C_issplit, "C") - BasicTest.assertTrue_memory_layout(self, a_heat_2d_F_issplit, "F") + self.assertTrue_memory_layout(a_heat_2d_C_issplit, "C") + self.assertTrue_memory_layout(a_heat_2d_F_issplit, "F") a_heat_2d_F_issplit_sum = a_heat_2d_F_issplit.sum(1) a_torch_2d_sum = a_torch_2d.sum(1) * size - BasicTest.assert_array_equal(self, a_heat_2d_F_issplit_sum, a_torch_2d_sum) + self.assert_array_equal(a_heat_2d_F_issplit_sum, a_torch_2d_sum) # distributed, is_split, 5D a_heat_5d_C_issplit = ht.array(a_torch_5d, is_split=-2) a_heat_5d_F_issplit = ht.array(a_torch_5d, is_split=-2, order="F") - BasicTest.assertTrue_memory_layout(self, a_heat_5d_C_issplit, "C") - BasicTest.assertTrue_memory_layout(self, a_heat_5d_F_issplit, "F") + self.assertTrue_memory_layout(a_heat_5d_C_issplit, "C") + self.assertTrue_memory_layout(a_heat_5d_F_issplit, "F") a_heat_5d_F_issplit_sum = a_heat_5d_F_issplit.sum(-2) a_torch_5d_sum = a_torch_5d.sum(-2) * size - BasicTest.assert_array_equal(self, a_heat_5d_F_issplit_sum, a_torch_5d_sum) + self.assert_array_equal(a_heat_5d_F_issplit_sum, a_torch_5d_sum) # test exceptions with self.assertRaises(NotImplementedError): ht.zeros_like(a_heat_5d_C_split, order="K") From e7d0090faeef9433b89bc767e353f24582953d44 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Sat, 7 Dec 2019 07:46:31 +0100 Subject: [PATCH 33/40] Re-adding assertTrue_memory_layout to BasicTest --- heat/core/tests/test_suites/basic_test.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/heat/core/tests/test_suites/basic_test.py b/heat/core/tests/test_suites/basic_test.py index a2c661d6ec..3d522f1238 100644 --- a/heat/core/tests/test_suites/basic_test.py +++ b/heat/core/tests/test_suites/basic_test.py @@ -291,6 +291,24 @@ def assert_func_equal_for_tensor( else: self.assertTrue(np.array_equal(ht_res._DNDarray__array.numpy(), np_res)) + def assertTrue_memory_layout(self, tensor, order): + """ + Checks that the memory layout of a given heat tensor is as specified by argument order. + + Parameters: + ----------- + order: str, 'C' for C-like (row-major), 'F' for Fortran-like (column-major) memory layout. + """ + stride = tensor._DNDarray__array.stride() + row_major = all(np.diff(list(stride)) <= 0) + column_major = all(np.diff(list(stride)) >= 0) + if order == "C": + return self.assertTrue(row_major) + elif order == "F": + return self.assertTrue(column_major) + else: + raise ValueError("expected order to be 'C' or 'F', but was {}".format(order)) + def __create_random_np_array(self, shape, dtype=np.float64, low=-10000, high=10000): """ Creates a random array based on the input parameters. From 3bb13ba403f401ca19266e37bca90e04bf7a66d7 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 9 Dec 2019 11:35:42 +0100 Subject: [PATCH 34/40] Function test_dndarray.test_stride_and_strides(), first pass, --- heat/core/tests/test_dndarray.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/heat/core/tests/test_dndarray.py b/heat/core/tests/test_dndarray.py index ddf1a9a275..c6ad5819c3 100644 --- a/heat/core/tests/test_dndarray.py +++ b/heat/core/tests/test_dndarray.py @@ -709,3 +709,10 @@ def test_size_gnumel(self): self.assertEqual(a.gnumel, 10 * 10 * 10) self.assertEqual(ht.array(0).size, 1) + + def test_stride_and_strides(self): + torch_int32 = torch.arange(6 * 5 * 3 * 4 * 5 * 7, dtype=torch.int32).reshape(6, 5, 3, 4, 5, 7) + heat_int32 = ht.array(torch_int32) + numpy_int32 = torch_int32.numpy() + self.assertEqual(heat_int32.stride(), torch_int32.stride()) + From 218345bcb8620cfcebd64756cb4240ae7692b208 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 9 Dec 2019 11:45:57 +0100 Subject: [PATCH 35/40] Added float32, row-major and float64, column-major stride tests --- heat/core/tests/test_dndarray.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/heat/core/tests/test_dndarray.py b/heat/core/tests/test_dndarray.py index c6ad5819c3..42bb0fb266 100644 --- a/heat/core/tests/test_dndarray.py +++ b/heat/core/tests/test_dndarray.py @@ -711,8 +711,21 @@ def test_size_gnumel(self): self.assertEqual(ht.array(0).size, 1) def test_stride_and_strides(self): - torch_int32 = torch.arange(6 * 5 * 3 * 4 * 5 * 7, dtype=torch.int32).reshape(6, 5, 3, 4, 5, 7) - heat_int32 = ht.array(torch_int32) - numpy_int32 = torch_int32.numpy() - self.assertEqual(heat_int32.stride(), torch_int32.stride()) - + #Local, int16, row-major memory layout + torch_int16 = torch.arange(6 * 5 * 3 * 4 * 5 * 7, dtype=torch.int16).reshape(6, 5, 3, 4, 5, 7) + heat_int16 = ht.array(torch_int16) + numpy_int16 = torch_int16.numpy() + self.assertEqual(heat_int16.stride(), torch_int16.stride()) + self.assertEqual(heat_int16.strides, numpy_int16.strides) + #Local, float32, row-major memory layout + torch_float32 = torch.arange(6 * 5 * 3 * 4 * 5 * 7, dtype=torch.float32).reshape(6, 5, 3, 4, 5, 7) + heat_float32 = ht.array(torch_float32) + numpy_float32 = torch_float32.numpy() + self.assertEqual(heat_float32.stride(), torch_float32.stride()) + self.assertEqual(heat_float32.strides, numpy_float32.strides) + #Local, float64, column-major memory layout + torch_float64 = torch.arange(6 * 5 * 3 * 4 * 5 * 7, dtype=torch.float64).reshape(6, 5, 3, 4, 5, 7) + heat_float64_F = ht.array(torch_float64, order='F') + numpy_float64_F = np.array(torch_float64.numpy(), order='F') + self.assertNotEqual(heat_float64_F.stride(), torch_float64.stride()) + self.assertEqual(heat_float64_F.strides, numpy_float64_F.strides) \ No newline at end of file From d3a7499f2d40f86702724dbe07ba3142ad1bd0f9 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 9 Dec 2019 13:53:45 +0100 Subject: [PATCH 36/40] test_stride_and_strides: Added test cases for distributed tensors in row-major and column-major order --- heat/core/tests/test_dndarray.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/heat/core/tests/test_dndarray.py b/heat/core/tests/test_dndarray.py index 42bb0fb266..d38b878883 100644 --- a/heat/core/tests/test_dndarray.py +++ b/heat/core/tests/test_dndarray.py @@ -728,4 +728,28 @@ def test_stride_and_strides(self): heat_float64_F = ht.array(torch_float64, order='F') numpy_float64_F = np.array(torch_float64.numpy(), order='F') self.assertNotEqual(heat_float64_F.stride(), torch_float64.stride()) - self.assertEqual(heat_float64_F.strides, numpy_float64_F.strides) \ No newline at end of file + self.assertEqual(heat_float64_F.strides, numpy_float64_F.strides) + #Distributed, int16, row-major memory layout + size = ht.communication.MPI_WORLD.size + split= 2 + torch_int16 = torch.arange(6 * 5 * 3*size * 4 * 5 * 7, dtype=torch.int16).reshape(6, 5, 3*size, 4, 5, 7) + heat_int16_split = ht.array(torch_int16, split=split) + numpy_int16 = torch_int16.numpy() + if size > 1: + self.assertNotEqual(heat_int16_split.stride(), torch_int16.stride()) + numpy_int16_split_strides = tuple(np.array(numpy_int16.strides[:split])/size) + numpy_int16.strides[split:] + self.assertEqual(heat_int16_split.strides, numpy_int16_split_strides) + # Distributed, float32, row-major memory layout + split= -1 + torch_float32 = torch.arange(6 * 5 * 3 * 4 * 5 * 7*size, dtype=torch.float32).reshape(6, 5, 3, 4, 5, 7*size) + heat_float32_split = ht.array(torch_float32, split=split) + numpy_float32 = torch_float32.numpy() + numpy_float32_split_strides = tuple(np.array(numpy_float32.strides[:split])/size) + numpy_float32.strides[split:] + self.assertEqual(heat_float32_split.strides, numpy_float32_split_strides) + #Distributed, float64, column-major memory layout + split=-2 + torch_float64 = torch.arange(6 * 5 * 3 * 4 * 5*size * 7, dtype=torch.float64).reshape(6, 5, 3, 4, 5*size, 7) + heat_float64_F_split = ht.array(torch_float64, order='F', split=split) + numpy_float64_F = np.array(torch_float64.numpy(), order='F') + numpy_float64_F_split_strides = numpy_float64_F.strides[:split+1] + tuple(np.array(numpy_float64_F.strides[split+1:])/size) + self.assertEqual(heat_float64_F_split.strides, numpy_float64_F_split_strides) \ No newline at end of file From ae00b59531f43f8ea423c8e07e7707e2028a2902 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 9 Dec 2019 13:55:56 +0100 Subject: [PATCH 37/40] pre-commit reformatting --- heat/core/tests/test_dndarray.py | 64 ++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 23 deletions(-) diff --git a/heat/core/tests/test_dndarray.py b/heat/core/tests/test_dndarray.py index d38b878883..07a420d54e 100644 --- a/heat/core/tests/test_dndarray.py +++ b/heat/core/tests/test_dndarray.py @@ -709,47 +709,65 @@ def test_size_gnumel(self): self.assertEqual(a.gnumel, 10 * 10 * 10) self.assertEqual(ht.array(0).size, 1) - + def test_stride_and_strides(self): - #Local, int16, row-major memory layout - torch_int16 = torch.arange(6 * 5 * 3 * 4 * 5 * 7, dtype=torch.int16).reshape(6, 5, 3, 4, 5, 7) + # Local, int16, row-major memory layout + torch_int16 = torch.arange(6 * 5 * 3 * 4 * 5 * 7, dtype=torch.int16).reshape( + 6, 5, 3, 4, 5, 7 + ) heat_int16 = ht.array(torch_int16) numpy_int16 = torch_int16.numpy() self.assertEqual(heat_int16.stride(), torch_int16.stride()) self.assertEqual(heat_int16.strides, numpy_int16.strides) - #Local, float32, row-major memory layout - torch_float32 = torch.arange(6 * 5 * 3 * 4 * 5 * 7, dtype=torch.float32).reshape(6, 5, 3, 4, 5, 7) + # Local, float32, row-major memory layout + torch_float32 = torch.arange(6 * 5 * 3 * 4 * 5 * 7, dtype=torch.float32).reshape( + 6, 5, 3, 4, 5, 7 + ) heat_float32 = ht.array(torch_float32) numpy_float32 = torch_float32.numpy() self.assertEqual(heat_float32.stride(), torch_float32.stride()) self.assertEqual(heat_float32.strides, numpy_float32.strides) - #Local, float64, column-major memory layout - torch_float64 = torch.arange(6 * 5 * 3 * 4 * 5 * 7, dtype=torch.float64).reshape(6, 5, 3, 4, 5, 7) - heat_float64_F = ht.array(torch_float64, order='F') - numpy_float64_F = np.array(torch_float64.numpy(), order='F') + # Local, float64, column-major memory layout + torch_float64 = torch.arange(6 * 5 * 3 * 4 * 5 * 7, dtype=torch.float64).reshape( + 6, 5, 3, 4, 5, 7 + ) + heat_float64_F = ht.array(torch_float64, order="F") + numpy_float64_F = np.array(torch_float64.numpy(), order="F") self.assertNotEqual(heat_float64_F.stride(), torch_float64.stride()) self.assertEqual(heat_float64_F.strides, numpy_float64_F.strides) - #Distributed, int16, row-major memory layout + # Distributed, int16, row-major memory layout size = ht.communication.MPI_WORLD.size - split= 2 - torch_int16 = torch.arange(6 * 5 * 3*size * 4 * 5 * 7, dtype=torch.int16).reshape(6, 5, 3*size, 4, 5, 7) + split = 2 + torch_int16 = torch.arange(6 * 5 * 3 * size * 4 * 5 * 7, dtype=torch.int16).reshape( + 6, 5, 3 * size, 4, 5, 7 + ) heat_int16_split = ht.array(torch_int16, split=split) numpy_int16 = torch_int16.numpy() if size > 1: self.assertNotEqual(heat_int16_split.stride(), torch_int16.stride()) - numpy_int16_split_strides = tuple(np.array(numpy_int16.strides[:split])/size) + numpy_int16.strides[split:] + numpy_int16_split_strides = ( + tuple(np.array(numpy_int16.strides[:split]) / size) + numpy_int16.strides[split:] + ) self.assertEqual(heat_int16_split.strides, numpy_int16_split_strides) # Distributed, float32, row-major memory layout - split= -1 - torch_float32 = torch.arange(6 * 5 * 3 * 4 * 5 * 7*size, dtype=torch.float32).reshape(6, 5, 3, 4, 5, 7*size) + split = -1 + torch_float32 = torch.arange(6 * 5 * 3 * 4 * 5 * 7 * size, dtype=torch.float32).reshape( + 6, 5, 3, 4, 5, 7 * size + ) heat_float32_split = ht.array(torch_float32, split=split) numpy_float32 = torch_float32.numpy() - numpy_float32_split_strides = tuple(np.array(numpy_float32.strides[:split])/size) + numpy_float32.strides[split:] + numpy_float32_split_strides = ( + tuple(np.array(numpy_float32.strides[:split]) / size) + numpy_float32.strides[split:] + ) self.assertEqual(heat_float32_split.strides, numpy_float32_split_strides) - #Distributed, float64, column-major memory layout - split=-2 - torch_float64 = torch.arange(6 * 5 * 3 * 4 * 5*size * 7, dtype=torch.float64).reshape(6, 5, 3, 4, 5*size, 7) - heat_float64_F_split = ht.array(torch_float64, order='F', split=split) - numpy_float64_F = np.array(torch_float64.numpy(), order='F') - numpy_float64_F_split_strides = numpy_float64_F.strides[:split+1] + tuple(np.array(numpy_float64_F.strides[split+1:])/size) - self.assertEqual(heat_float64_F_split.strides, numpy_float64_F_split_strides) \ No newline at end of file + # Distributed, float64, column-major memory layout + split = -2 + torch_float64 = torch.arange(6 * 5 * 3 * 4 * 5 * size * 7, dtype=torch.float64).reshape( + 6, 5, 3, 4, 5 * size, 7 + ) + heat_float64_F_split = ht.array(torch_float64, order="F", split=split) + numpy_float64_F = np.array(torch_float64.numpy(), order="F") + numpy_float64_F_split_strides = numpy_float64_F.strides[: split + 1] + tuple( + np.array(numpy_float64_F.strides[split + 1 :]) / size + ) + self.assertEqual(heat_float64_F_split.strides, numpy_float64_F_split_strides) From fbe9738b68d85edc5b6bac315adfa15d06861a8c Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Mon, 9 Dec 2019 14:12:14 +0100 Subject: [PATCH 38/40] Added function test_basic_test.test_assertTrue_memory_layout() --- heat/core/tests/test_suites/test_basic_test.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/heat/core/tests/test_suites/test_basic_test.py b/heat/core/tests/test_suites/test_basic_test.py index eaf6f3a1b7..6fc536226f 100644 --- a/heat/core/tests/test_suites/test_basic_test.py +++ b/heat/core/tests/test_suites/test_basic_test.py @@ -85,3 +85,9 @@ def test_assert_func_equal_for_tensor(self): array = ht.ones((15, 15)) with self.assertRaises(TypeError): self.assert_func_equal_for_tensor(array, heat_func=ht_func, numpy_func=np_func) + + def test_assertTrue_memory_layout(self): + data = torch.arange(3 * 4 * 5).reshape(3, 4, 5) + data_F = ht.array(data, order="F") + with self.assertRaises(ValueError): + self.assertTrue_memory_layout(data_F, order="K") From 38802e8011f707cb98d936bbf30a6d3a3ca14741 Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Tue, 10 Dec 2019 09:12:32 +0100 Subject: [PATCH 39/40] dndrray.strides, replaced np.array(...)*itemsize with list comprehension. --- heat/core/dndarray.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/heat/core/dndarray.py b/heat/core/dndarray.py index 57a28b6dba..e4754350dd 100644 --- a/heat/core/dndarray.py +++ b/heat/core/dndarray.py @@ -184,9 +184,10 @@ def strides(self): tuple of ints: bytes to step in each dimension when traversing a tensor. numpy-like usage: self.strides """ - stride = np.array(self._DNDarray__array.stride()) + steps = list(self._DNDarray__array.stride()) itemsize = self._DNDarray__array.storage().element_size() - return tuple(stride * itemsize) + strides = tuple(step*itemsize for step in steps) + return strides @property def T(self, axes=None): From 46fbc2e080e9ac7560267bd4923e107ba580cb0a Mon Sep 17 00:00:00 2001 From: Claudia Comito Date: Tue, 10 Dec 2019 09:14:58 +0100 Subject: [PATCH 40/40] pre-commit minor changes --- heat/core/dndarray.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/heat/core/dndarray.py b/heat/core/dndarray.py index e4754350dd..3cf2ab0eac 100644 --- a/heat/core/dndarray.py +++ b/heat/core/dndarray.py @@ -186,7 +186,7 @@ def strides(self): """ steps = list(self._DNDarray__array.stride()) itemsize = self._DNDarray__array.storage().element_size() - strides = tuple(step*itemsize for step in steps) + strides = tuple(step * itemsize for step in steps) return strides @property