Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor reshape #614

Merged
merged 3 commits into from
Mar 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 10 additions & 4 deletions docs/framework/operators/tensor/tensor.reshape.md
Original file line number Diff line number Diff line change
@@ -1,15 +1,21 @@
# tensor.reshape

```rust
fn reshape(self: @Tensor<T>, target_shape: Span<usize>) -> Tensor<T>;
fn reshape(self: @Tensor<T>, target_shape: Span<i32>, allowzero: bool) -> Tensor<T>;
```

Returns a new tensor with the specified target shape and the same data as the input tensor.
Reshape the input tensor similar to numpy.reshape. First input is the data tensor, second
input is a shape tensor which specifies the output shape. It outputs the reshaped tensor.
At most one dimension of the new shape can be -1. In this case, the value is inferred from
the size of the tensor and the remaining dimensions. A dimension could also be 0, in which case
the actual dimension value is unchanged (i.e. taken from the input tensor). If 'allowzero' is set,
and the new shape includes 0, the dimension will be set explicitly to zero (i.e. not taken from input tensor)

## Args

* `self`(`@Tensor<T>`) - The input tensor.
* `target_shape`(Span<usize>) - A span containing the target shape of the tensor.
* `target_shape`(Span<i32>) - A span containing the target shape of the tensor.
* `allowzero`(`bool`) - Indicates that if any value in the 'shape' input is set to zero, the zero value is honored, similar to NumPy.

## Panics

Expand All @@ -32,7 +38,7 @@ fn reshape_tensor_example() -> Tensor<u32> {
);

// We can call `reshape` function as follows.
return tensor.reshape(target_shape: array![2, 4].span());
return tensor.reshape(target_shape: array![2, 4].span(), false);
}
>>> [[0,1,2,3], [4,5,6,7]]
```
136 changes: 136 additions & 0 deletions nodegen/node/reshape.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, Tensor, Dtype

original_shape = [2, 3, 4]
data = np.random.random_sample(original_shape).astype(np.int32)


def reshape_reference_implementation(
data: np.ndarray, shape: np.ndarray, allowzero: int = 0
) -> np.ndarray:
# replace zeros with corresponding dim size
# we need to do this because np.reshape doesn't support 0 by default unless 'allowzero' is set
new_shape = np.copy(shape)
if allowzero == 0:
zeros_index = np.where(shape == 0)
new_shape[zeros_index] = np.array(data.shape)[zeros_index]
reshaped = np.reshape(data, new_shape)
return reshaped


class Reshape(RunAll):
@staticmethod
def reshape_reordered_all_dims():
y = reshape_reference_implementation(
data, np.array([4, 2, 3], dtype=np.int64))

x = Tensor(Dtype.I32, data.shape, data.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())

name = "reshape_reordered_all_dims"
make_test([x], y, "input_0.reshape(array![4,2,3].span(), false)", name)

@staticmethod
def reshape_reordered_last_dims():
y = reshape_reference_implementation(
data, np.array([2, 4, 3], dtype=np.int64))

x = Tensor(Dtype.I32, data.shape, data.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())

name = "reshape_reordered_last_dims"
make_test([x], y, "input_0.reshape(array![2,4,3].span(), false)", name)

@staticmethod
def reshape_reduced_dims():
y = reshape_reference_implementation(
data, np.array([2, 12], dtype=np.int64))

x = Tensor(Dtype.I32, data.shape, data.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())

name = "reshape_reduced_dims"
make_test([x], y, "input_0.reshape(array![2,12].span(), false)", name)

@staticmethod
def reshape_extended_dims():
y = reshape_reference_implementation(
data, np.array([2, 3, 2, 2], dtype=np.int64))

x = Tensor(Dtype.I32, data.shape, data.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())

name = "reshape_extended_dims"
make_test([x], y, "input_0.reshape(array![2, 3, 2, 2].span(), false)", name)

@staticmethod
def reshape_one_dim():
y = reshape_reference_implementation(
data, np.array([24], dtype=np.int64))

x = Tensor(Dtype.I32, data.shape, data.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())

name = "reshape_one_dim"
make_test([x], y, "input_0.reshape(array![24].span(), false)", name)

@staticmethod
def reshape_negative_dim():
y = reshape_reference_implementation(
data, np.array([2, -1, 2], dtype=np.int64))

x = Tensor(Dtype.I32, data.shape, data.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())

name = "reshape_negative_dim"
make_test([x], y, "input_0.reshape(array![2, -1, 2].span(), false)", name)

@staticmethod
def reshape_negative_extended_dims():
y = reshape_reference_implementation(
data, np.array([-1, 2, 3, 4], dtype=np.int64))

x = Tensor(Dtype.I32, data.shape, data.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())

name = "reshape_negative_extended_dims"
make_test([x], y, "input_0.reshape(array![-1, 2, 3, 4].span(), false)", name)

@staticmethod
def reshape_zero_dim():
y = reshape_reference_implementation(
data, np.array([2, 0, 4, 1], dtype=np.int64))

x = Tensor(Dtype.I32, data.shape, data.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())

name = "reshape_zero_dim"
make_test([x], y, "input_0.reshape(array![2, 0, 4, 1].span(), false)", name)

@staticmethod
def reshape_zero_and_negative_dim():
y = reshape_reference_implementation(
data, np.array([2, 0, 1, -1], dtype=np.int64))

x = Tensor(Dtype.I32, data.shape, data.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())

name = "reshape_zero_and_negative_dim"
make_test([x], y, "input_0.reshape(array![2, 0, 1, -1].span(), false)", name)

@staticmethod
def reshape_zero_and_negative_dim():
original_shape = [0, 3, 4]
data = np.random.random_sample(original_shape).astype(np.int32)

y = reshape_reference_implementation(
data, np.array([3, 4, 0], dtype=np.int64), allowzero=1)

x = Tensor(Dtype.I32, data.shape, data.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())

name = "reshape_zero_and_negative_dim"
make_test([x], y, "input_0.reshape(array![3, 4, 0].span(), true)", name)


66 changes: 38 additions & 28 deletions src/operators/nn/functional/col2im.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -56,43 +56,53 @@ fn col2im<T, MAG, +TensorTrait<T>, +NumberTrait<T, MAG>, +Copy<T>, +Drop<T>, +Ad
let bl = prod(block_shape, 0);
let C = *(*data).shape.at(1) / bl;

let mut new_shape = array![*(*data).shape.at(0), C, bl];
let mut new_shape: Array<i32> = array![
(*(*data).shape.at(0)).try_into().unwrap(), C.try_into().unwrap(), bl.try_into().unwrap()
];
let mut i = 2;
while i != (*data).shape.len() {
new_shape.append(*(*data).shape.at(i));
i += 1;
};
while i != (*data)
.shape
.len() {
new_shape.append((*(*data).shape.at(i)).try_into().unwrap());
i += 1;
};

let data = data.reshape(new_shape.span());
let data = data.reshape(new_shape.span(), false);

let mut res: Array<T> = array![];
let data_stride = stride(data.shape);

let mut n = 0;
while n != *data.shape.at(0) {
let mut c = 0;
while c != *data.shape.at(1) {
let data_n_c = TensorTrait::new(
SpanTrait::slice(data.shape, 2, data.shape.len() - 2),
SpanTrait::slice(
data.data, n * *data_stride.at(0) + c * *data_stride.at(1), *data_stride.at(1)
)
);
let mut out = col2im_naive_implementation(
@data_n_c, image_shape, block_shape, dilations, pads, strides
);
let mut i = 0;
while i != out.len() {
res.append(out.at(i));
i += 1;
};
while n != *data
.shape
.at(0) {
let mut c = 0;
while c != *data
.shape
.at(1) {
let data_n_c = TensorTrait::new(
SpanTrait::slice(data.shape, 2, data.shape.len() - 2),
SpanTrait::slice(
data.data,
n * *data_stride.at(0) + c * *data_stride.at(1),
*data_stride.at(1)
)
);
let mut out = col2im_naive_implementation(
@data_n_c, image_shape, block_shape, dilations, pads, strides
);
let mut i = 0;
while i != out.len() {
res.append(out.at(i));
i += 1;
};

c += 1;
};

c += 1;
n += 1;
};

n += 1;
};

let mut new_shape = array![*data.shape.at(0), *data.shape.at(1)];
let mut i = 0;
while i != image_shape.len() {
Expand Down Expand Up @@ -289,4 +299,4 @@ fn prod<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<
};

prod
}
}
Loading
Loading