Skip to content

Commit

Permalink
Fix symint related functionalization ops (#1289)
Browse files Browse the repository at this point in the history
* Fix symint related functionalization ops

* Remove zeros xfail from LTC tests
  • Loading branch information
antoniojkim authored Aug 26, 2022
1 parent 0e3ddba commit 8e880a2
Show file tree
Hide file tree
Showing 4 changed files with 32 additions and 24 deletions.
27 changes: 24 additions & 3 deletions build_tools/autogen_ltc_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,9 @@
from torchgen.gen_backend_stubs import parse_backend_yaml

TORCH_DIR = Path(importlib.util.find_spec("torch").origin).resolve().parent.parent
if TORCH_DIR.joinpath("torch", "include").is_dir():
TORCH_DIR = TORCH_DIR.joinpath("torch", "include")
TORCH_INCLUDE_DIR = TORCH_DIR.joinpath("torch", "include")
if not TORCH_INCLUDE_DIR.is_dir():
TORCH_INCLUDE_DIR = TORCH_DIR
TORCHGEN_DIR = Path(torchgen.__path__[0]).resolve()
TORCH_MLIR_DIR = Path(__file__).resolve().parent.parent

Expand Down Expand Up @@ -167,6 +168,9 @@ def generate_native_functions(self):
ts_native_yaml = None
if ts_native_yaml_path.exists():
ts_native_yaml = yaml.load(ts_native_yaml_path.read_text(), yaml.CLoader)
else:
logging.warning(f"Could not find `ts_native_functions.yaml` at {ts_native_yaml_path}")


parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
self.native_functions = parsed_yaml.native_functions
Expand Down Expand Up @@ -290,6 +294,7 @@ def get_opnames(ops):

if ts_native_yaml:
ts_full_codegen = set(ts_native_yaml["full_codegen"])
ts_supported = set(ts_native_yaml["supported"])
mlir_full_codegen = set(self.ops)

if ts_full_codegen - mlir_full_codegen:
Expand All @@ -308,6 +313,22 @@ def get_opnames(ops):
)
)

if ts_supported - supported:
logging.debug(
"Ops supported by the TorchScript backend "
"but not by the Torch-MLIR backend:\n {}".format(
"\n ".join(sorted(ts_supported - supported))
)
)

if supported - ts_supported:
logging.debug(
"Ops supported by the Torch-MLIR backend "
"but not by the TorchScript backend:\n {}".format(
"\n ".join(sorted(supported - ts_supported))
)
)

def generate_shape_inference(self):
parsed_backend_yaml = parse_backend_yaml(
self.source_yaml,
Expand Down Expand Up @@ -367,7 +388,7 @@ def extract_signatures(text):
)
assert len(shape_inference_decls) > 0
upstream_shape_inference_decls = extract_signatures(
TORCH_DIR.joinpath(
TORCH_INCLUDE_DIR.joinpath(
"torch", "csrc", "lazy", "core", "shape_inference.h"
).read_text()
)
Expand Down
3 changes: 1 addition & 2 deletions build_tools/autogen_ltc_backend.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ blacklist:
- new_empty
- rsub
- slice.Tensor # Disabled in favour of slice_copy.Tensor
- zeros

# Disabled in favour of functionalized alternatives
- _reshape_alias
Expand Down Expand Up @@ -59,14 +60,12 @@ supported:
# but their implementations call view operators (which we need to functionalize away).
- block_diag
- new_empty_strided
- narrow_copy
- pixel_shuffle
- pixel_unshuffle
- select_backward
- slice_backward
- diagonal_backward
- _trilinear
- linalg_inv_ex
- linalg_pinv.atol_rtol_tensor
- logsumexp.out

Expand Down
6 changes: 0 additions & 6 deletions e2e_testing/torchscript/xfail_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -435,12 +435,6 @@
"NewOnesModuleFloat3D_basic",
"NewOnesModuleInt2D_basic",
"NewOnesModuleInt3D_basic",
"NewZerosModuleDefaultDtype_basic",
"NewZerosModuleFalsePinMemory_basic",
"NewZerosModuleFloat2D_basic",
"NewZerosModuleFloat3D_basic",
"NewZerosModuleInt2D_basic",
"NewZerosModuleInt3D_basic",
"OnesLikeModule_defaultDtype",
"OnesLikeModule_falsePinMemory",
"OnesLikeModule_float",
Expand Down
20 changes: 7 additions & 13 deletions python/torch_mlir/csrc/base_lazy_backend/mlir_native_functions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -302,10 +302,12 @@ at::Tensor LazyNativeFunctions::_to_copy(
};

at::Tensor LazyNativeFunctions::empty(
at::IntArrayRef size, c10::optional<at::ScalarType> dtype,
at::SymIntArrayRef sym_size, c10::optional<at::ScalarType> dtype,
c10::optional<at::Layout> layout, c10::optional<at::Device> device,
c10::optional<bool> pin_memory,
c10::optional<at::MemoryFormat> memory_format) {
// TODO: support this directly
auto size = c10::asIntArrayRefSlow(sym_size);
const auto device_type = torch::lazy::getBackend()->EagerFallbackDeviceType();
at::TensorOptions options = at::TensorOptions()
.device(c10::Device(device_type))
Expand All @@ -331,7 +333,9 @@ at::Tensor LazyNativeFunctions::empty_strided(
c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout,
c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
TORCH_LAZY_FN_COUNTER("lazy::");
at::Tensor t = empty(size, dtype, layout, device, pin_memory, c10::nullopt);
at::Tensor t = empty(
c10::SymIntArrayRef::fromIntArrayRef(size),
dtype, layout, device, pin_memory, c10::nullopt);
return t.as_strided(size, stride, /*storage_offset=*/0);
}

Expand All @@ -350,7 +354,7 @@ LazyNativeFunctions::fill_(at::Tensor& self, const at::Scalar& value) {
at::Tensor LazyNativeFunctions::_unsafe_view(
const at::Tensor& self, at::IntArrayRef size) {
TORCH_LAZY_FN_COUNTER("lazy::");
return LazyNativeFunctions::view_copy(self, size);
return LazyNativeFunctions::view_copy(self, c10::SymIntArrayRef::fromIntArrayRef(size));
}

// This is needed by the torch.tensor constructor.
Expand Down Expand Up @@ -385,11 +389,6 @@ at::Tensor LazyNativeFunctions::new_empty_strided(
self, size, stride, dtype, layout, device, pin_memory);
}

at::Tensor LazyNativeFunctions::narrow_copy(
const at::Tensor& self, int64_t dim, int64_t start, int64_t length) {
return at::functionalization::functionalize_aten_op<ATEN_OP(
narrow_copy)>::call(self, dim, start, length);
}
at::Tensor LazyNativeFunctions::pixel_shuffle(
const at::Tensor& self, int64_t upscale_factor) {
return at::functionalization::functionalize_aten_op<ATEN_OP(
Expand Down Expand Up @@ -425,11 +424,6 @@ at::Tensor LazyNativeFunctions::_trilinear(
return at::functionalization::functionalize_aten_op<ATEN_OP(_trilinear)>::
call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim);
}
::std::tuple<at::Tensor, at::Tensor>
LazyNativeFunctions::linalg_inv_ex(const at::Tensor& self, bool check_errors) {
return at::functionalization::functionalize_aten_op<ATEN_OP(
linalg_inv_ex)>::call(self, check_errors);
}
at::Tensor LazyNativeFunctions::linalg_pinv(
const at::Tensor& self, const c10::optional<at::Tensor>& atol,
const c10::optional<at::Tensor>& rtol, bool hermitian) {
Expand Down

0 comments on commit 8e880a2

Please sign in to comment.