Skip to content

Commit

Permalink
Merge pull request #785 from helmholtz-analytics/bug/769-resplit-afte…
Browse files Browse the repository at this point in the history
…r-index

Change MPI buffer creation + 1D non-contiguous tensors
  • Loading branch information
coquelin77 authored Jun 7, 2021
2 parents 3dc9aca + 1a75eb2 commit 184f112
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 16 deletions.
8 changes: 3 additions & 5 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,17 +38,15 @@ Example on 2 processes:

## Bug Fixes
- [#758](https://github.com/helmholtz-analytics/heat/pull/758) Fix indexing inconsistencies in `DNDarray.__getitem__()`
- [#768](https://github.com/helmholtz-analytics/heat/pull/768) Fixed an issue where `deg2rad` and `rad2deg`are not working with the 'out' parameter.
- [#785](https://github.com/helmholtz-analytics/heat/pull/785) Removed `storage_offset` when finding the mpi buffer (`communication. MPICommunication.as_mpi_memory()`).
- [#785](https://github.com/helmholtz-analytics/heat/pull/785) added allowance for 1 dimensional non-contiguous local tensors in `communication. MPICommunication.mpi_type_and_elements_of()`
### Linear Algebra
- [#718](https://github.com/helmholtz-analytics/heat/pull/718) New feature: `trace()`
- [#768](https://github.com/helmholtz-analytics/heat/pull/768) New feature: unary positive and negative operations
### Misc.
- [#761](https://github.com/helmholtz-analytics/heat/pull/761) New feature: `result_type`

## Breaking Changes

## Bug fixes
- [#768](https://github.com/helmholtz-analytics/heat/pull/768) Fixed an issue where `deg2rad` and `rad2deg`are not working with the 'out' parameter.


# v1.0.0

Expand Down
21 changes: 14 additions & 7 deletions heat/core/communication.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,10 +301,7 @@ def as_mpi_memory(cls, obj) -> MPI.memory:
obj : torch.Tensor
The tensor to be converted into a MPI memory view.
"""
pointer = obj.data_ptr()
pointer += obj.storage_offset()

return MPI.memory.fromaddress(pointer, 0)
return MPI.memory.fromaddress(obj.data_ptr(), 0)

@classmethod
def as_buffer(
Expand All @@ -322,9 +319,19 @@ def as_buffer(
displs : Tuple[int,...], optional
Optional displacements arguments for variable MPI-calls (e.g. Alltoallv)
"""
squ = False
if not obj.is_contiguous() and obj.ndim == 1:
# this makes the math work below this function.
obj.unsqueeze_(-1)
squ = True
mpi_type, elements = cls.mpi_type_and_elements_of(obj, counts, displs)

return [cls.as_mpi_memory(obj), elements, mpi_type]
mpi_mem = cls.as_mpi_memory(obj)
if squ:
# the squeeze happens in the mpi_type_and_elements_of function in the case of a
# non-contiguous 1D tensor. Squeezing it puts the memory back to where it should be
obj.squeeze_(-1)
return [mpi_mem, elements, mpi_type]

def alltoall_sendbuffer(
self, obj: torch.Tensor
Expand All @@ -338,7 +345,7 @@ def alltoall_sendbuffer(
obj: torch.Tensor
The object to be transformed into a custom MPI datatype
"""
mpi_type, _ = self.__mpi_type_mappings[obj.dtype], torch.numel(obj)
mpi_type = self.__mpi_type_mappings[obj.dtype]

nproc = self.size
shape = obj.shape
Expand Down Expand Up @@ -1234,7 +1241,7 @@ def __alltoall_like(
# keep a reference to the original buffer object
original_recvbuf = recvbuf

# Simple case, continuos buffers can be transmitted as is
# Simple case, continuous buffers can be transmitted as is
if send_axis < 2 and recv_axis < 2:
send_axis_permutation = list(range(recvbuf.ndimension()))
recv_axis_permutation = list(range(recvbuf.ndimension()))
Expand Down
30 changes: 30 additions & 0 deletions heat/core/manipulations.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from . import _operations

__all__ = [
"balance",
"column_stack",
"concatenate",
"diag",
Expand Down Expand Up @@ -54,6 +55,35 @@
]


def balance(array: DNDarray, copy=False) -> DNDarray:
"""
Out of place balance function. More information on the meaning of balance can be found in
:func:`DNDarray.balance_() <heat.core.dndarray.DNDarray.balance_()>`.
Parameters
----------
array : DNDarray
the DNDarray to be balanced
copy : bool, optional
if the DNDarray should be copied before being balanced. If false (default) this will balance
the original array and return that array. Otherwise (true), a balanced copy of the array
will be returned.
Default: False
Returns
-------
balanced : DNDarray
The balanced DNDarray
"""
cpy = array.copy() if copy else array
cpy.balance_()
return cpy


DNDarray.balance = lambda self, copy=False: balance(self, copy)
DNDarray.balance.__doc__ = balance.__doc__


def column_stack(arrays: Sequence[DNDarray, ...]) -> DNDarray:
"""
Stack 1-D or 2-D `DNDarray`s as columns into a 2-D `DNDarray`.
Expand Down
14 changes: 10 additions & 4 deletions heat/core/tests/test_dndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,13 +262,11 @@ def test_balance_and_lshape_map(self):
self.assertTrue(data.is_balanced())

data = ht.zeros((70, 20), split=0, dtype=ht.float64)
data = data[:50]
data.balance_()
data = ht.balance(data[:50], copy=True)
self.assertTrue(data.is_balanced())

data = ht.zeros((4, 120), split=1, dtype=ht.int64)
data = data[:, 40:70]
data.balance_()
data = data[:, 40:70].balance()
self.assertTrue(data.is_balanced())

data = np.loadtxt("heat/datasets/iris.csv", delimiter=";")
Expand Down Expand Up @@ -968,6 +966,14 @@ def test_resplit(self):
self.assertEqual(resplit_a.dtype, ht.int64)
del a

# 1D non-contiguous resplit testing
t1 = ht.arange(10 * 10, split=0).reshape((10, 10))
t1_sub = t1[:, 1] # .expand_dims(0)
res = ht.array([1, 11, 21, 31, 41, 51, 61, 71, 81, 91])
t1_sub.resplit_(axis=None)
self.assertTrue(ht.all(t1_sub == res))
self.assertEqual(t1_sub.split, None)

def test_rshift(self):
int_tensor = ht.array([[0, 2], [4, 8]])
int_result = ht.array([[0, 0], [1, 2]])
Expand Down

0 comments on commit 184f112

Please sign in to comment.