Skip to content

Commit

Permalink
Clean up some parts of the test suite
Browse files Browse the repository at this point in the history
The purpose of the test suite is to accelerate the development of the
compiler. However, we had various tests there that were not expected to
work, had no in-progress work being tested by the test, and nobody was
actively working on them. Having such tests in our test suite just adds
clutter and slows down development on the compiler.
  • Loading branch information
silvasean committed Nov 21, 2022
1 parent a9fb0c5 commit 22307a1
Show file tree
Hide file tree
Showing 6 changed files with 4 additions and 296 deletions.
11 changes: 4 additions & 7 deletions e2e_testing/xfail_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@

REFBACKEND_XFAIL_SET = COMMON_TORCH_MLIR_LOWERING_XFAILS

EAGER_MODE_XFAIL_SET = {
# RefBackend fails
"TableBatchEmbeddingModule_basic",
"QuantizedMLP_basic",
EAGER_MODE_XFAIL_SET = COMMON_TORCH_MLIR_LOWERING_XFAILS | {
# RefBackend fails for some reason.
# These tests pass in the regular RefBackend flow, so it's unclear
# why they fail here.
"Matmul_vecmat",
"BatchMlpLayerModule_basic",
"UpSampleNearest2dDynamicFactor_basic",
Expand Down Expand Up @@ -619,7 +619,6 @@
"StdUnbiasedModule_basic",
"SubFloatModule_basic",
"SubIntModule_basic",
"TableBatchEmbeddingModule_basic",
"TensorsConcatNegativeDimModule_basic",
"TensorToBoolZeroRank_basic",
"TensorToBool_basic",
Expand All @@ -645,10 +644,8 @@
"Fill_TensorFloat32WithInt64_basic",
"UpSampleNearest2dBackwardVec_basic",
"UpSampleNearest2dBackwardOutputSizeNone_basic",
"ConvolutionBackwardModule1D_basic",
"ConvolutionBackwardModule2D_basic",
"ConvolutionBackwardModule2DPadded_basic",
"ConvolutionBackwardModule3D_basic",
"VarMeanCorrectionModule_basic",
"VarMeanCorrectionNoneModule_basic"
}
10 changes: 0 additions & 10 deletions python/torch_mlir_e2e_test/test_suite/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,6 @@
# to the backend contract.
COMMON_TORCH_MLIR_LOWERING_XFAILS = {
"QuantizedMLP_basic",
"TableBatchEmbeddingModule_basic",
"Convolution3DModule_basic",
"Convolution1DModule_basic",
"Conv_Transpose3dModule_basic",
"Conv_Transpose1dModule_basic",
"MaxPool2dWith3dInputModule_basic",
"MaxPool2dWithIndicesWith3dInputModule_basic",
"ConvolutionBackwardModule1D_basic",
"ConvolutionBackwardModule3D_basic",
}

def register_all_tests():
Expand Down Expand Up @@ -47,7 +38,6 @@ def register_all_tests():
from . import constant_alloc
from . import threshold
from . import histogram_binning_calibration
from . import table_batch_embedding
from . import rng
from . import cast
from . import index_put
Expand Down
69 changes: 0 additions & 69 deletions python/torch_mlir_e2e_test/test_suite/backprop.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,41 +58,6 @@ def TanhBackward_basic(module, tu: TestUtils):

# ==============================================================================


class ConvolutionBackwardModule1D(torch.nn.Module):

def __init__(self):
super().__init__()

@export
@annotate_args([
None,
([-1, -1, -1], torch.float32, True),
([-1, -1, -1], torch.float32, True),
([-1, -1, -1], torch.float32, True),
])
def forward(self, grad_out, input_vec, weight):
return torch.ops.aten.convolution_backward(
grad_out,
input_vec,
weight,
bias_sizes=None,
stride=[1],
padding=[0],
dilation=[1],
transposed=False,
output_padding=[0],
groups=1,
output_mask=[True, True, True])


@register_test_case(module_factory=lambda: ConvolutionBackwardModule1D())
def ConvolutionBackwardModule1D_basic(module, tu: TestUtils):
with torch.backends.mkldnn.flags(enabled=False):
module.forward(tu.rand(3, 3, 3), tu.rand(3, 3, 3),
tu.rand(3, 3, 1))


class ConvolutionBackwardModule2D(torch.nn.Module):

def __init__(self):
Expand Down Expand Up @@ -161,40 +126,6 @@ def ConvolutionBackwardModule2DPadded_basic(module, tu: TestUtils):
tu.rand(2, 2, 3, 3))


class ConvolutionBackwardModule3D(torch.nn.Module):

def __init__(self):
super().__init__()

@export
@annotate_args([
None,
([-1, -1, -1, -1, -1], torch.float32, True),
([-1, -1, -1, -1, -1], torch.float32, True),
([-1, -1, -1, -1, -1], torch.float32, True),
])
def forward(self, grad_out, input_vec, weight):
return torch.ops.aten.convolution_backward(
grad_out,
input_vec,
weight,
bias_sizes=None,
stride=[1, 1, 1],
padding=[0],
dilation=[1, 1, 1],
transposed=False,
output_padding=[0],
groups=1,
output_mask=[True, True, True])


@register_test_case(module_factory=lambda: ConvolutionBackwardModule3D())
def ConvolutionBackwardModule3D_basic(module, tu: TestUtils):
with torch.backends.mkldnn.flags(enabled=False):
module.forward(tu.rand(3, 3, 3, 3, 3), tu.rand(3, 3, 3, 3, 3),
tu.rand(3, 3, 1, 1, 1))


# ==============================================================================


Expand Down
103 changes: 0 additions & 103 deletions python/torch_mlir_e2e_test/test_suite/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,32 +142,6 @@ def Conv2dWithPaddingDilationStrideStaticModule_basic(module, tu: TestUtils):

# ==============================================================================

class Convolution1DModule(torch.nn.Module):
def __init__(self):
super().__init__()

@export
@annotate_args([
None,
([-1, -1, -1], torch.float32, True),
([-1, -1, -1], torch.float32, True),
])
def forward(self, inputVec, weight):
return torch.ops.aten.convolution(inputVec,
weight,
bias=None,
stride=[1],
padding=[0],
dilation=[1],
transposed=False,
output_padding=[0],
groups=1)


@register_test_case(module_factory=lambda: Convolution1DModule())
def Convolution1DModule_basic(module, tu: TestUtils):
module.forward(torch.randn(3, 3, 10), torch.randn(3, 3, 2))

class Convolution2DModule(torch.nn.Module):
def __init__(self):
super().__init__()
Expand All @@ -193,31 +167,6 @@ def forward(self, inputVec, weight):
def Convolution2DModule_basic(module, tu: TestUtils):
module.forward(torch.randn(3, 3, 10, 10), torch.randn(3, 3, 2, 2))

class Convolution3DModule(torch.nn.Module):
def __init__(self):
super().__init__()

@export
@annotate_args([
None,
([-1, -1, -1, -1, -1], torch.float32, True),
([-1, -1, -1, -1, -1], torch.float32, True),
])
def forward(self, inputVec, weight):
return torch.ops.aten.convolution(inputVec,
weight,
bias=None,
stride=[1, 1, 1],
padding=[0, 0, 0],
dilation=[1, 1, 1],
transposed=False,
output_padding=[0, 0, 0],
groups=1)


@register_test_case(module_factory=lambda: Convolution3DModule())
def Convolution3DModule_basic(module, tu: TestUtils):
module.forward(torch.randn(3, 3, 10, 10, 10), torch.randn(3, 3, 2, 2, 2))

class Convolution2DStaticModule(torch.nn.Module):
def __init__(self):
Expand Down Expand Up @@ -635,33 +584,6 @@ def ConvolutionModule2DTransposeStridedStatic_basic(module, tu: TestUtils):
module.forward(torch.randn(5, 2, 5, 6), torch.randn(2, 5, 2, 2))


class Conv_Transpose1dModule(torch.nn.Module):

def __init__(self):
super().__init__()

@export
@annotate_args([
None,
([-1, -1, -1], torch.float32, True),
([-1, -1, -1], torch.float32, True),
])
def forward(self, inputVec, weight):
return torch.ops.aten.conv_transpose1d(inputVec,
weight,
bias=None,
stride=[2],
padding=[1],
dilation=[1],
output_padding=[0],
groups=1)


@register_test_case(module_factory=lambda: Conv_Transpose1dModule())
def Conv_Transpose1dModule_basic(module, tu: TestUtils):
module.forward(torch.randn(5, 2, 5), torch.randn(2, 5, 2))


class Conv_Transpose2dModule(torch.nn.Module):

def __init__(self):
Expand All @@ -688,31 +610,6 @@ def forward(self, inputVec, weight):
def Conv_Transpose2dModule_basic(module, tu: TestUtils):
module.forward(torch.randn(5, 2, 5, 6), torch.randn(2, 5, 2, 2))

class Conv_Transpose3dModule(torch.nn.Module):

def __init__(self):
super().__init__()

@export
@annotate_args([
None,
([-1, -1, -1, -1, -1], torch.float32, True),
([-1, -1, -1, -1, -1], torch.float32, True),
])
def forward(self, inputVec, weight):
return torch.ops.aten.conv_transpose3d(inputVec,
weight,
bias=None,
stride=[2, 2, 2],
padding=[1, 1, 1],
dilation=[1, 1, 1],
output_padding=[0, 0, 0],
groups=1)


@register_test_case(module_factory=lambda: Conv_Transpose3dModule())
def Conv_Transpose3dModule_basic(module, tu: TestUtils):
module.forward(torch.randn(5, 2, 5, 6, 4), torch.randn(2, 5, 2, 2, 2))

class UpSampleNearest2d(torch.nn.Module):

Expand Down
47 changes: 0 additions & 47 deletions python/torch_mlir_e2e_test/test_suite/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,29 +171,6 @@ def MaxPool2dCeilModeTrueModule_basic(module, tu: TestUtils):
module.forward(tu.rand(1, 1, 20, 20, low=0.5, high=1.0))


class MaxPool2dWith3dInputModule(torch.nn.Module):

def __init__(self):
super().__init__()
self.mp2d = torch.nn.MaxPool2d(kernel_size=[6, 8],
stride=[2, 2],
padding=[3, 4],
dilation=2)

@export
@annotate_args([
None,
([-1, -1, -1], torch.float32, True),
])
def forward(self, x):
return self.mp2d(x)


@register_test_case(module_factory=lambda: MaxPool2dWith3dInputModule())
def MaxPool2dWith3dInputModule_basic(module, tu: TestUtils):
module.forward(tu.rand(1, 20, 20, low=-1))


# ==============================================================================


Expand Down Expand Up @@ -435,30 +412,6 @@ def MaxPool2dWithIndicesCeilModeTrueModule_basic(module, tu: TestUtils):
module.forward(tu.rand(1, 1, 8, 8, low=0.5, high=1.0))


class MaxPool2dWithIndicesWith3dInputModule(torch.nn.Module):

def __init__(self):
super().__init__()

@export
@annotate_args([
None,
([-1, -1, -1], torch.float32, True),
])
def forward(self, x):
return torch.ops.aten.max_pool2d_with_indices(x,
kernel_size=[2, 2],
stride=[1, 1],
padding=[0, 0],
dilation=[1, 1])


@register_test_case(
module_factory=lambda: MaxPool2dWithIndicesWith3dInputModule())
def MaxPool2dWithIndicesWith3dInputModule_basic(module, tu: TestUtils):
module.forward(tu.rand(1, 8, 8, low=0.5, high=1.0))


# ==============================================================================


Expand Down
60 changes: 0 additions & 60 deletions python/torch_mlir_e2e_test/test_suite/table_batch_embedding.py

This file was deleted.

0 comments on commit 22307a1

Please sign in to comment.