From 3094a8b57b9b9752551a60d7cee1e7f33188f26f Mon Sep 17 00:00:00 2001 From: Robert Knight Date: Fri, 20 Dec 2024 17:54:38 +0000 Subject: [PATCH 1/7] Make `TensorBase::reshaped` return maybe-owned view Previously `reshaped` had a footgun where it would panic if the input view was not contiguous. Make it copy the input instead in this case. The downside is that consumers cannot chain `reshaped` into the middle of a sequence of view operations because the lifetime of the result is now shorter. --- rten-tensor/src/tensor.rs | 56 +++++++++++++++++++++++++++++++-------- src/ops/conv.rs | 15 ++++++----- src/ops/einsum.rs | 4 +-- src/ops/layout.rs | 12 ++++----- src/ops/matmul.rs | 2 +- 5 files changed, 62 insertions(+), 27 deletions(-) diff --git a/rten-tensor/src/tensor.rs b/rten-tensor/src/tensor.rs index 6971944e..5988fe22 100644 --- a/rten-tensor/src/tensor.rs +++ b/rten-tensor/src/tensor.rs @@ -257,7 +257,13 @@ pub trait AsView: Layout { /// a dynamic rank if it is a slice. /// /// Panics if the tensor is not contiguous. - fn reshaped(&self, shape: S) -> TensorBase, S::Layout> { + fn reshaped( + &self, + shape: S, + ) -> TensorBase, S::Layout> + where + Self::Elem: Clone, + { self.view().reshaped(shape) } @@ -1454,13 +1460,24 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { /// Change the shape of this tensor without copying data. /// /// See [`AsView::reshaped`]. - pub fn reshaped(&self, shape: S) -> TensorBase, S::Layout> { - TensorBase { - data: self.data, - layout: self + pub fn reshaped(&self, shape: S) -> TensorBase, S::Layout> + where + T: Clone, + { + if let Ok(layout) = self.layout.reshaped_for_view(shape) { + TensorBase { + data: CowData::Borrowed(self.data), + layout, + } + } else { + let layout = self .layout - .reshaped_for_view(shape) - .expect("reshape failed"), + .reshaped_for_copy(shape) + .expect("invalid target shape for `reshape`"); + TensorBase { + data: CowData::Owned(self.to_vec()), + layout, + } } } @@ -3342,19 +3359,36 @@ mod tests { #[test] fn test_reshaped() { let data = &[1., 2., 3., 4., 5., 6.]; - let tensor = NdTensorView::from_data([1, 1, 2, 1, 3], data); + let tensor = NdTensorView::from_data([2, 3], data); - // Reshape to static dim count + // Non-copying reshape to static dim count let reshaped = tensor.reshaped([6]); assert_eq!(reshaped.shape(), [6]); + assert_eq!( + reshaped.view().storage().as_ptr(), + tensor.view().storage().as_ptr() + ); + + // Copying reshape to static dim count + let reshaped = tensor.transposed().reshaped([6]); + assert_eq!(reshaped.shape(), [6]); + assert_ne!( + reshaped.view().storage().as_ptr(), + tensor.view().storage().as_ptr() + ); + assert_eq!(reshaped.to_vec(), &[1., 4., 2., 5., 3., 6.]); - // Reshape to dynamic dim count + // Non-copying reshape to dynamic dim count let reshaped = tensor.reshaped([6].as_slice()); assert_eq!(reshaped.shape(), &[6]); + assert_eq!( + reshaped.view().storage().as_ptr(), + tensor.view().storage().as_ptr() + ); } #[test] - #[should_panic(expected = "reshape failed")] + #[should_panic(expected = "invalid target shape for `reshape`: LengthMismatch")] fn test_reshaped_invalid() { let tensor = NdTensor::arange(0, 16, None); tensor.reshaped([2, 2]); diff --git a/src/ops/conv.rs b/src/ops/conv.rs index b890d697..9cd4ce19 100644 --- a/src/ops/conv.rs +++ b/src/ops/conv.rs @@ -56,8 +56,8 @@ where gemm.gemm_uninit_bias( out_item.data_mut().unwrap(), out_row_stride, - GemmInputA::Unpacked(kernel_mat), - GemmInputB::Unpacked(in_mat), + GemmInputA::Unpacked(kernel_mat.view()), + GemmInputB::Unpacked(in_mat.view()), 1., // alpha bias_vec, ); @@ -249,7 +249,7 @@ where // Prepack kernel if we'll be able to reuse packed weights. let prepacked_kernel = if in_group.size(0) > 1 { - Some(gemm.prepack_a_in(pool, kernel_mat).auto_return(pool)) + Some(gemm.prepack_a_in(pool, kernel_mat.view()).auto_return(pool)) } else { None }; @@ -281,7 +281,7 @@ where out_row_stride, prepacked_kernel .map(GemmInputA::Packed) - .unwrap_or(GemmInputA::Unpacked(kernel_mat)), + .unwrap_or(GemmInputA::Unpacked(kernel_mat.view())), GemmInputB::Virtual(&im2col), 1., // alpha bias_vec, @@ -535,7 +535,8 @@ pub fn conv_transpose( let mut col2im_mat = NdTensor::uninit_in(pool, [out_c * k_h * k_w, in_h * in_w]).auto_return(pool); - let kernel_mat = kernel.reshaped([k_in_c, out_c * k_h * k_w]).transposed(); + let kernel_mat = kernel.reshaped([k_in_c, out_c * k_h * k_w]); + let kernel_mat = kernel_mat.transposed(); let gemm = GemmExecutor::new(); // The implementation here is the inverse of the im2col-based convolution. @@ -548,7 +549,7 @@ pub fn conv_transpose( col2im_mat.data_mut().unwrap(), col2im_row_stride, GemmInputA::Unpacked(kernel_mat), - GemmInputB::Unpacked(input_mat), + GemmInputB::Unpacked(input_mat.view()), 1., /* alpha */ ); @@ -558,7 +559,7 @@ pub fn conv_transpose( col2im( &mut out_img, - &col2im_mat.reshaped([out_c, k_h, k_w, in_h, in_w]), + &col2im_mat.reshaped([out_c, k_h, k_w, in_h, in_w]).view(), [pad_top, pad_left, pad_right, pad_bottom], [stride_h, stride_w], bias, diff --git a/src/ops/einsum.rs b/src/ops/einsum.rs index 19c7cf26..369f8c37 100644 --- a/src/ops/einsum.rs +++ b/src/ops/einsum.rs @@ -379,8 +379,8 @@ fn einsum_step( .collect(); einsum_matmul( pool, - &x, - &y, + &x.view(), + &y.view(), &term_simplified, &term_simplified, &step.output, diff --git a/src/ops/layout.rs b/src/ops/layout.rs index d326dc70..1d713f7d 100644 --- a/src/ops/layout.rs +++ b/src/ops/layout.rs @@ -45,12 +45,12 @@ pub fn depth_to_space( // See https://onnx.ai/onnx/operators/onnx__DepthToSpace.html#summary let tmp = input.to_contiguous_in(pool); let tmp = match mode { - DepthToSpaceMode::DepthColumnRow => tmp - .reshaped([n, block_size, block_size, new_c, h, w]) - .permuted([0, 3, 4, 1, 5, 2]), - DepthToSpaceMode::ColumnRowDepth => tmp - .reshaped([n, new_c, block_size, block_size, h, w]) - .permuted([0, 1, 4, 2, 5, 3]), + DepthToSpaceMode::DepthColumnRow => tmp.reshaped([n, block_size, block_size, new_c, h, w]), + DepthToSpaceMode::ColumnRowDepth => tmp.reshaped([n, new_c, block_size, block_size, h, w]), + }; + let tmp = match mode { + DepthToSpaceMode::DepthColumnRow => tmp.permuted([0, 3, 4, 1, 5, 2]), + DepthToSpaceMode::ColumnRowDepth => tmp.permuted([0, 1, 4, 2, 5, 3]), }; let mut tmp = tmp.to_tensor_in(pool).into_dyn(); tmp.reshape(&new_shape); diff --git a/src/ops/matmul.rs b/src/ops/matmul.rs index 729390a4..7005009f 100644 --- a/src/ops/matmul.rs +++ b/src/ops/matmul.rs @@ -188,7 +188,7 @@ where // nb. We assume `a` is likely already contiguous, so this will be cheap. let a_contig = a.to_contiguous_in(pool).auto_return(pool); let a_matrix = a_contig.reshaped([num_a_matrices * a_rows, a_cols].as_slice()); - let mut output = matmul_impl(pool, a_matrix, b.clone(), strategy, bias)?; + let mut output = matmul_impl(pool, a_matrix.view(), b.clone(), strategy, bias)?; output.reshape(out_shape); return Ok(output); } From d02348ab2be8520d0f72970b2de90b2f46e3cdbb Mon Sep 17 00:00:00 2001 From: Robert Knight Date: Fri, 20 Dec 2024 17:55:02 +0000 Subject: [PATCH 2/7] Add variant of `TensorBase::reshaped` that takes an allocator --- rten-tensor/src/tensor.rs | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/rten-tensor/src/tensor.rs b/rten-tensor/src/tensor.rs index 5988fe22..98e22f44 100644 --- a/rten-tensor/src/tensor.rs +++ b/rten-tensor/src/tensor.rs @@ -267,6 +267,19 @@ pub trait AsView: Layout { self.view().reshaped(shape) } + /// A variant of [`reshaped`](Self::reshaped) that allows specifying the + /// allocator to use if a copy is needed. + fn reshaped_in( + &self, + alloc: A, + shape: S, + ) -> TensorBase, S::Layout> + where + Self::Elem: Clone, + { + self.view().reshaped_in(alloc, shape) + } + /// Reverse the order of dimensions in this tensor. fn transpose(&mut self); @@ -1457,10 +1470,24 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { } } - /// Change the shape of this tensor without copying data. + /// Reshape this tensor, copying the data only if necessary. /// /// See [`AsView::reshaped`]. pub fn reshaped(&self, shape: S) -> TensorBase, S::Layout> + where + T: Clone, + { + self.reshaped_in(GlobalAlloc::new(), shape) + } + + /// Reshape this tensor, copying the data only if necessary. + /// + /// See [`AsView::reshaped_in`]. + pub fn reshaped_in( + &self, + alloc: A, + shape: S, + ) -> TensorBase, S::Layout> where T: Clone, { @@ -1475,7 +1502,7 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { .reshaped_for_copy(shape) .expect("invalid target shape for `reshape`"); TensorBase { - data: CowData::Owned(self.to_vec()), + data: CowData::Owned(self.to_vec_in(alloc)), layout, } } From 71dbdfa381d0dfc4a0601a80d2a31723a3601dd8 Mon Sep 17 00:00:00 2001 From: Robert Knight Date: Fri, 20 Dec 2024 17:55:12 +0000 Subject: [PATCH 3/7] Use `TensorBase::reshaped_in` in rten ops Use `reshaped_in` to replace `to_contiguous_in` followed by `reshaped` calls. --- src/ops/conv.rs | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/src/ops/conv.rs b/src/ops/conv.rs index 9cd4ce19..7ea8b50d 100644 --- a/src/ops/conv.rs +++ b/src/ops/conv.rs @@ -34,11 +34,7 @@ where let [out_c, in_c, _, _]: [usize; 4] = kernel.shape(); let mut output = NdTensor::uninit_in(pool, [batch, out_c, in_h * in_w]); - // Get input and kernel as contiguous tensors so we can create reshaped - // views. - let input = input.to_contiguous_in(pool).auto_return(pool); - let kernel = kernel.to_contiguous_in(pool).auto_return(pool); - let kernel_mat = kernel.reshaped([out_c, in_c]); + let kernel_mat = kernel.reshaped_in(pool, [out_c, in_c]).auto_return(pool); // Bias must be contiguous for use with `gemm_bias`. let bias = bias.as_ref().map(|b| b.to_contiguous()); @@ -51,7 +47,10 @@ where let mut out_item = output.slice_mut([n]); let out_row_stride = out_item.stride(0); - let in_mat = input.slice([n]).reshaped([in_c, in_h * in_w]); + let in_mat = input + .slice([n]) + .reshaped_in(pool, [in_c, in_h * in_w]) + .auto_return(pool); gemm.gemm_uninit_bias( out_item.data_mut().unwrap(), @@ -242,10 +241,10 @@ where let in_group = input.slice((.., in_chan_start..in_chan_end)); let mut out_group = output.slice_mut((.., out_chans.clone())); - let kernel = kernel.to_contiguous_in(pool); - let kernel_mat = kernel - .slice([out_chans.clone()]) - .reshaped([out_channels_per_group, in_channels_per_group * k_h * k_w]); + let kernel_mat = kernel.slice([out_chans.clone()]).reshaped_in( + pool, + [out_channels_per_group, in_channels_per_group * k_h * k_w], + ); // Prepack kernel if we'll be able to reuse packed weights. let prepacked_kernel = if in_group.size(0) > 1 { @@ -529,20 +528,21 @@ pub fn conv_transpose( let mut output = NdTensor::uninit_in(pool, [batch, out_c, out_h, out_w]); - // Ensure input and kernel are contiguous to support reshaping. - let input = input.to_contiguous_in(pool).auto_return(pool); - let kernel = kernel.to_contiguous_in(pool).auto_return(pool); - let mut col2im_mat = NdTensor::uninit_in(pool, [out_c * k_h * k_w, in_h * in_w]).auto_return(pool); - let kernel_mat = kernel.reshaped([k_in_c, out_c * k_h * k_w]); + let kernel_mat = kernel + .reshaped_in(pool, [k_in_c, out_c * k_h * k_w]) + .auto_return(pool); let kernel_mat = kernel_mat.transposed(); let gemm = GemmExecutor::new(); // The implementation here is the inverse of the im2col-based convolution. let mut n_init = 0; for n in 0..batch { - let input_mat = input.slice([n]).reshaped([in_c, in_h * in_w]); + let input_mat = input + .slice([n]) + .reshaped_in(pool, [in_c, in_h * in_w]) + .auto_return(pool); let col2im_row_stride = col2im_mat.stride(0); gemm.gemm_uninit( From 72fb8d0b57fd4e8a6da82d7ada1ae1db963915d0 Mon Sep 17 00:00:00 2001 From: Robert Knight Date: Fri, 20 Dec 2024 17:55:19 +0000 Subject: [PATCH 4/7] Replace `reshape` with `reshaped_in` in `conv_transpose` This fixes a hazard where ConvTranspose could panic if passed non-contiguous inputs with one spatial dim. --- src/ops/conv.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/ops/conv.rs b/src/ops/conv.rs index 7ea8b50d..9fa8e46d 100644 --- a/src/ops/conv.rs +++ b/src/ops/conv.rs @@ -475,11 +475,12 @@ pub fn conv_transpose( if let &[n, c, w] = input.shape() { let [out_c, k_in_c, k_w] = static_dims!(kernel, 3, "OCW")?.shape(); - let mut input_2d = input.clone(); - input_2d.reshape(&[n, c, 1, w]); - - let mut kernel_2d = kernel.clone(); - kernel_2d.reshape(&[out_c, k_in_c, 1, k_w]); + let input_2d = input + .reshaped_in(pool, [n, c, 1, w].as_slice()) + .auto_return(pool); + let kernel_2d = kernel + .reshaped_in(pool, [out_c, k_in_c, 1, k_w].as_slice()) + .auto_return(pool); let padding_2d = padding.expand_1d_to_2d()?; @@ -490,7 +491,14 @@ pub fn conv_transpose( } }; - let result_2d = conv_transpose(pool, input_2d, kernel_2d, bias, padding_2d, &strides_2d); + let result_2d = conv_transpose( + pool, + input_2d.view(), + kernel_2d.view(), + bias, + padding_2d, + &strides_2d, + ); return result_2d.map(|mut t| { let [n, c, _h, w]: [usize; 4] = t.shape().try_into().expect("expected 4D output"); From 3d4a179b375232c28881914efd166a07bb1e7f1f Mon Sep 17 00:00:00 2001 From: Robert Knight Date: Fri, 20 Dec 2024 17:55:29 +0000 Subject: [PATCH 5/7] Remove `reshape` method for tensor views This had a footgun where it would panic if the view was not contiguous. Also the implementation failed to check that the new shape had the same number of elements as the old shape (!). This should be replaced with the `reshaped` method. --- rten-tensor/src/tensor.rs | 37 ------------------------------------- 1 file changed, 37 deletions(-) diff --git a/rten-tensor/src/tensor.rs b/rten-tensor/src/tensor.rs index 98e22f44..738b4d1f 100644 --- a/rten-tensor/src/tensor.rs +++ b/rten-tensor/src/tensor.rs @@ -1923,19 +1923,6 @@ impl TensorBase, DynLayout> { } } -impl TensorBase, DynLayout> { - /// Reshape this view. - /// - /// Panics if the view is not contiguous. - pub fn reshape(&mut self, shape: &[usize]) - where - T: Clone, - { - assert!(self.is_contiguous(), "can only reshape contiguous views"); - self.layout = DynLayout::from_shape(shape); - } -} - impl<'a, T, L: MutLayout> TensorBase, L> { /// Divide this tensor into two mutable views along a given axis. /// @@ -1974,19 +1961,6 @@ impl<'a, T, L: MutLayout> TensorBase, L> { } } -impl TensorBase, DynLayout> { - /// Reshape this view. - /// - /// Panics if the view is not contiguous. - pub fn reshape(&mut self, shape: &[usize]) - where - T: Clone, - { - assert!(self.is_contiguous(), "can only reshape contiguous views"); - self.layout = DynLayout::from_shape(shape); - } -} - impl FromIterator for TensorBase, L> where [usize; 1]: AsIndex, @@ -3358,22 +3332,11 @@ mod tests { #[test] fn test_reshape() { - // Owned tensor let mut tensor = Tensor::::from_data(&[2, 2], vec![1., 2., 3., 4.]); tensor.transpose(); tensor.reshape(&[4]); assert_eq!(tensor.shape(), &[4]); assert_eq!(tensor.to_vec(), &[1., 3., 2., 4.]); - - // View - let mut view = tensor.view(); - view.reshape(&[2, 2]); - assert_eq!(view.shape(), &[2, 2]); - - // Mut view - let mut view_mut = tensor.view_mut(); - view_mut.reshape(&[2, 2]); - assert_eq!(view_mut.shape(), &[2, 2]); } #[test] From 6d1d1bd59fc469ef1693fb4b7aab40776958ac64 Mon Sep 17 00:00:00 2001 From: Robert Knight Date: Fri, 20 Dec 2024 17:56:16 +0000 Subject: [PATCH 6/7] Make `reshaped_mut` return error if view is not contiguous Unlike `TensorBase::reshaped` we can't fall back to copying if the view is not contiguous because this method is often used to obtain a mutable slice into a larger tensor to write to, and it would be incorrect to write into a copy. Instead make this method return a result so that callers are made aware they need to consider this possibility. --- rten-tensor/src/tensor.rs | 20 ++++++++++---------- src/ops/conv.rs | 4 +++- src/ops/layout.rs | 5 ++++- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/rten-tensor/src/tensor.rs b/rten-tensor/src/tensor.rs index 738b4d1f..00b8178a 100644 --- a/rten-tensor/src/tensor.rs +++ b/rten-tensor/src/tensor.rs @@ -6,7 +6,7 @@ use std::ops::{Index, IndexMut, Range}; use crate::copy::{ copy_into, copy_into_slice, copy_into_uninit, copy_range_into_slice, map_into_slice, }; -use crate::errors::{DimensionError, ExpandError, FromDataError, SliceError}; +use crate::errors::{DimensionError, ExpandError, FromDataError, ReshapeError, SliceError}; use crate::iterators::{ for_each_mut, AxisChunks, AxisChunksMut, AxisIter, AxisIterMut, InnerIter, InnerIterDyn, InnerIterDynMut, InnerIterMut, Iter, IterMut, Lanes, LanesMut, MutViewRef, ViewRef, @@ -769,18 +769,18 @@ impl TensorBase { /// Change the layout of the tensor without moving any data. /// - /// See [`AsView::reshaped`]. + /// This will return an error if the view is not contiguous. + /// + /// See also [`AsView::reshaped`]. pub fn reshaped_mut( &mut self, shape: SH, - ) -> TensorBase, SH::Layout> { - TensorBase { - layout: self - .layout - .reshaped_for_view(shape) - .expect("reshape failed"), + ) -> Result, SH::Layout>, ReshapeError> { + let layout = self.layout.reshaped_for_view(shape)?; + Ok(TensorBase { + layout, data: self.data.view_mut(), - } + }) } /// Slice this tensor along a given axis. @@ -3389,7 +3389,7 @@ mod tests { let data = vec![1., 2., 3., 4., 5., 6.]; let mut tensor = NdTensor::from_data([1, 1, 2, 1, 3], data); - let mut reshaped = tensor.reshaped_mut([6]); + let mut reshaped = tensor.reshaped_mut([6]).unwrap(); reshaped[[0]] = 0.; reshaped[[5]] = 0.; diff --git a/src/ops/conv.rs b/src/ops/conv.rs index 9fa8e46d..dfd4c5fd 100644 --- a/src/ops/conv.rs +++ b/src/ops/conv.rs @@ -259,7 +259,9 @@ where .zip(in_group.axis_iter(0)) .par_bridge() .for_each(|(mut out_item, in_item)| { - let mut out_mat = out_item.reshaped_mut([out_channels_per_group, out_h * out_w]); + let mut out_mat = out_item + .reshaped_mut([out_channels_per_group, out_h * out_w]) + .unwrap(); let out_row_stride = out_mat.stride(0); let im2col = VirtualIm2Col::new( diff --git a/src/ops/layout.rs b/src/ops/layout.rs index 1d713f7d..d8946dcf 100644 --- a/src/ops/layout.rs +++ b/src/ops/layout.rs @@ -1372,7 +1372,10 @@ mod tests { let reference_transpose_stats = run_bench(100, None, || { let transposed = tensor.permuted(perm); - reference_transpose_into(transposed.view(), dest.reshaped_mut(transposed.shape())); + reference_transpose_into( + transposed.view(), + dest.reshaped_mut(transposed.shape()).unwrap(), + ); }); let transpose_stats = run_bench(100, None, || { From d291e1ec714a34c50c877cbff6d3dddc410758d7 Mon Sep 17 00:00:00 2001 From: Robert Knight Date: Fri, 20 Dec 2024 17:56:29 +0000 Subject: [PATCH 7/7] Revise docs for `TensorBase::reshaped` --- rten-tensor/src/tensor.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/rten-tensor/src/tensor.rs b/rten-tensor/src/tensor.rs index 00b8178a..e3030d7f 100644 --- a/rten-tensor/src/tensor.rs +++ b/rten-tensor/src/tensor.rs @@ -249,14 +249,20 @@ pub trait AsView: Layout { self.view().permuted(order) } - /// Return a view with a given shape, without copying any data. This - /// requires that the tensor is contiguous. + /// Return either a view or a copy of `self` with the given shape. /// /// The new shape must have the same number of elments as the current /// shape. The result will have a static rank if `shape` is an array or /// a dynamic rank if it is a slice. /// - /// Panics if the tensor is not contiguous. + /// If `self` is contiguous this will return a view, as changing the shape + /// can be done without moving data. Otherwise it will copy elements into + /// a new tensor. + /// + /// # Panics + /// + /// Panics if the number of elements in the new shape does not match the + /// current shape. fn reshaped( &self, shape: S, @@ -267,7 +273,7 @@ pub trait AsView: Layout { self.view().reshaped(shape) } - /// A variant of [`reshaped`](Self::reshaped) that allows specifying the + /// A variant of [`reshaped`](AsView::reshaped) that allows specifying the /// allocator to use if a copy is needed. fn reshaped_in( &self, @@ -1470,7 +1476,7 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { } } - /// Reshape this tensor, copying the data only if necessary. + /// Return a view or owned tensor that has the given shape. /// /// See [`AsView::reshaped`]. pub fn reshaped(&self, shape: S) -> TensorBase, S::Layout> @@ -1480,9 +1486,7 @@ impl<'a, T, L: Clone + MutLayout> TensorBase, L> { self.reshaped_in(GlobalAlloc::new(), shape) } - /// Reshape this tensor, copying the data only if necessary. - /// - /// See [`AsView::reshaped_in`]. + /// Variant of [`reshaped`](Self::reshaped) that takes an allocator. pub fn reshaped_in( &self, alloc: A,