Skip to content

Commit

Permalink
update for compatibility with tensorflow tests
Browse files Browse the repository at this point in the history
  • Loading branch information
franckma31 committed Dec 2, 2024
1 parent ccdc261 commit 1fd81f4
Show file tree
Hide file tree
Showing 8 changed files with 105 additions and 39 deletions.
6 changes: 5 additions & 1 deletion tests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ def train_k_lip_model(
input_shape: tuple,
k_lip_model: float,
k_lip_data: float,
**kwargs
**kwargs,
):
"""
Create a generator, create a model, train it and return the results.
Expand Down Expand Up @@ -630,6 +630,8 @@ def test_spectralconv2d_pad(
layer_params["padding"] = pad
layer_params["padding_mode"] = pad_mode
layer_params["kernel_size"] = kernel_size
if not uft.is_supported_padding(pad_mode, SpectralConv2d):
pytest.skip(f"SpectralConv2d: Padding {pad_mode} not supported")
test_params = dict(
layer_type=SpectralConv2d,
layer_params=layer_params,
Expand Down Expand Up @@ -1372,6 +1374,8 @@ def test_Conv2d_vanilla_export(pad, pad_mode, kernel_size, layer_params, layer_t
layer_type = layer_type
input_shape = (1, 5, 5)

if not uft.is_supported_padding(pad_mode, layer_type):
pytest.skip(f"{layer_type}: Padding {pad_mode} not supported")
model = uft.generate_k_lip_model(layer_type, layer_params, input_shape, 1.0)

# lay = SpectralConvTranspose2d(**kwargs)
Expand Down
1 change: 0 additions & 1 deletion tests/test_losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,6 @@ def test_loss_generic_value(
y_true, y_pred = uft.to_tensor(y_true_np), uft.to_tensor(y_pred_np)

loss_val = uft.compute_loss(loss, y_pred, y_true).numpy()
print("loss_val", loss_val, expected_loss)
np.testing.assert_allclose(
loss_val,
np.float32(expected_loss),
Expand Down
1 change: 0 additions & 1 deletion tests/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,6 @@ def test_provable_vs_adjusted(loss, loss_params, nb_class):

l1 = pr(y, x).numpy()
l2 = ar(y, x).numpy()
print(l1, l2)
diff = np.min(np.abs(l1 - l2))
assert (
diff > 1e-4
Expand Down
16 changes: 16 additions & 0 deletions tests/test_normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,10 @@ def check_serialization(layer_type, layer_params, input_shape=(10,)):
np.testing.assert_allclose(uft.to_numpy(y1), uft.to_numpy(y2))


@pytest.mark.skipif(
hasattr(LayerCentering, "unavailable_class"),
reason="LayerCentering not available",
)
@pytest.mark.parametrize(
"size, input_shape, bias",
[
Expand Down Expand Up @@ -92,6 +96,10 @@ def test_LayerCentering(size, input_shape, bias):
) # eval mode use running_mean


@pytest.mark.skipif(
hasattr(BatchCentering, "unavailable_class"),
reason="BatchCentering not available",
)
@pytest.mark.parametrize(
"size, input_shape, bias",
[
Expand Down Expand Up @@ -150,6 +158,8 @@ def test_BatchCentering(size, input_shape, bias):
)
def test_Normalization_serialization(norm_type, size, input_shape, bias):
# Check serialization
if hasattr(norm_type, "unavailable_class"):
pytest.skip(f"{norm_type} not available")
check_serialization(
norm_type, layer_params={"size": size, "bias": bias}, input_shape=input_shape
)
Expand Down Expand Up @@ -189,6 +199,8 @@ def linear_generator(batch_size, input_shape: tuple):
],
)
def test_Normalization_bias(norm_type, size, input_shape, bias):
if hasattr(norm_type, "unavailable_class"):
pytest.skip(f"{norm_type} not available")
m = uft.generate_k_lip_model(
norm_type,
layer_params={"size": size, "bias": bias},
Expand Down Expand Up @@ -221,6 +233,10 @@ def test_Normalization_bias(norm_type, size, input_shape, bias):
assert np.linalg.norm(bb) != 0.0


@pytest.mark.skipif(
hasattr(BatchCentering, "unavailable_class"),
reason="BatchCentering not available",
)
@pytest.mark.parametrize(
"size, input_shape, bias",
[
Expand Down
1 change: 0 additions & 1 deletion tests/test_normalizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@
)
def test_kernel_svd(kernel_shape):
"""Compare max singular value using power iteration and np.linalg.svd"""
print(kernel_shape)
kernel = rng.normal(size=kernel_shape).astype("float32")
sigmas_svd = np.linalg.svd(
np.reshape(kernel, (np.prod(kernel.shape[:-1]), kernel.shape[-1])),
Expand Down
6 changes: 3 additions & 3 deletions tests/test_unconstrained_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def compare(x, x_ref, index_x=[], index_x_ref=[]):
def test_padding(padding_tested, input_shape, batch_size, kernel_size, filters):
"""Test different padding types: assert values in original and padded tensors"""
input_shape = uft.to_framework_channel(input_shape)
if not uft.is_supported_padding(padding_tested):
if not uft.is_supported_padding(padding_tested, PadConv2d):
pytest.skip(f"Padding {padding_tested} not supported")
kernel_size_list = kernel_size
if isinstance(kernel_size, (int, float)):
Expand Down Expand Up @@ -176,7 +176,7 @@ def test_predict(padding_tested, input_shape, batch_size, kernel_size, filters):
in_ch = input_shape[0]
input_shape = uft.to_framework_channel(input_shape)

if not uft.is_supported_padding(padding_tested):
if not uft.is_supported_padding(padding_tested, PadConv2d):
pytest.skip(f"Padding {padding_tested} not supported")
layer_params = {
"out_channels": 2,
Expand Down Expand Up @@ -250,7 +250,7 @@ def test_vanilla(padding_tested, input_shape, batch_size, kernel_size, filters):
in_ch = input_shape[0]
input_shape = uft.to_framework_channel(input_shape)

if not uft.is_supported_padding(padding_tested):
if not uft.is_supported_padding(padding_tested, PadConv2d):
pytest.skip(f"Padding {padding_tested} not supported")
layer_params = {
"out_channels": 2,
Expand Down
51 changes: 33 additions & 18 deletions tests/test_updownsampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def check_downsample(x, y, kernel_size):
for dy in range(kernel_size):
xx = x[:, :, dx::kernel_size, dy::kernel_size]
yy = y[:, index :: (kernel_size * kernel_size), :, :]
np.testing.assert_array_equal(xx, yy)
np.testing.assert_almost_equal(xx, yy, decimal=6)
index += 1


Expand All @@ -47,29 +47,37 @@ def check_downsample(x, y, kernel_size):
reason="InvertibleDownSampling not available",
)
def test_invertible_downsample():

x = np.arange(32).reshape(1, 2, 4, 4)
x_np = np.arange(32).reshape(1, 2, 4, 4)
x = uft.to_NCHW_inv(x_np)
x = uft.to_tensor(x)
dw_layer = uft.get_instance_framework(InvertibleDownSampling, {"kernel_size": 2})
y = dw_layer(x)
assert y.shape == (1, 8, 2, 2)
check_downsample(x, y, 2)
y_np = uft.to_numpy(y)
y_np = uft.to_NCHW(y_np)
assert y_np.shape == (1, 8, 2, 2)
check_downsample(x_np, y_np, 2)

# 2D input
x = np.random.rand(10, 1, 128, 128) # torch.rand(10, 1, 128, 128)
x_np = np.random.rand(10, 1, 128, 128) # torch.rand(10, 1, 128, 128)
x = uft.to_NCHW_inv(x_np)
x = uft.to_tensor(x)

dw_layer = uft.get_instance_framework(InvertibleDownSampling, {"kernel_size": 4})
y = dw_layer(x)
assert y.shape == (10, 16, 32, 32)
check_downsample(x, y, 4)
y_np = uft.to_numpy(y)
y_np = uft.to_NCHW(y_np)
assert y_np.shape == (10, 16, 32, 32)
check_downsample(x_np, y_np, 4)

x = np.random.rand(10, 4, 64, 64)
x_np = np.random.rand(10, 4, 64, 64)
x = uft.to_NCHW_inv(x_np)
x = uft.to_tensor(x)
dw_layer = uft.get_instance_framework(InvertibleDownSampling, {"kernel_size": 2})
y = dw_layer(x)
assert y.shape == (10, 16, 32, 32)
check_downsample(x, y, 2)
y_np = uft.to_numpy(y)
y_np = uft.to_NCHW(y_np)
assert y_np.shape == (10, 16, 32, 32)
check_downsample(x_np, y_np, 2)


@pytest.mark.skipif(
Expand All @@ -79,17 +87,22 @@ def test_invertible_downsample():
def test_invertible_upsample():

# 2D input
x = np.random.rand(10, 16, 32, 32)
x_np = np.random.rand(10, 16, 32, 32)
x = uft.to_NCHW_inv(x_np)
x = uft.to_tensor(x)
dw_layer = uft.get_instance_framework(InvertibleUpSampling, {"kernel_size": 4})
y = dw_layer(x)
assert y.shape == (10, 1, 128, 128)
check_downsample(y, x, 4)
y_np = uft.to_numpy(y)
y_np = uft.to_NCHW(y_np)
assert y_np.shape == (10, 1, 128, 128)
check_downsample(y_np, x_np, 4)

dw_layer = uft.get_instance_framework(InvertibleUpSampling, {"kernel_size": 2})
y = dw_layer(x)
assert y.shape == (10, 4, 64, 64)
check_downsample(y, x, 2)
y_np = uft.to_numpy(y)
y_np = uft.to_NCHW(y_np)
assert y_np.shape == (10, 4, 64, 64)
check_downsample(y_np, x_np, 2)


@pytest.mark.skipif(
Expand All @@ -98,7 +111,8 @@ def test_invertible_upsample():
reason="InvertibleUpSampling not available",
)
def test_invertible_upsample_downsample():
x = np.random.rand(10, 16, 32, 32)
x_np = np.random.rand(10, 16, 32, 32)
x = uft.to_NCHW_inv(x_np)
x = uft.to_tensor(x)
up_layer = uft.get_instance_framework(InvertibleUpSampling, {"kernel_size": 4})
y = up_layer(x)
Expand All @@ -108,7 +122,8 @@ def test_invertible_upsample_downsample():
assert z.shape == x.shape
np.testing.assert_array_equal(x, z)

x = np.random.rand(10, 1, 128, 128) # torch.rand(10, 1, 128, 128)
x_np = np.random.rand(10, 1, 128, 128) # torch.rand(10, 1, 128, 128)
x = uft.to_NCHW_inv(x_np)
x = uft.to_tensor(x)

dw_layer = uft.get_instance_framework(InvertibleDownSampling, {"kernel_size": 4})
Expand Down
62 changes: 48 additions & 14 deletions tests/utils_framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,11 +208,18 @@ def get_instance_withcheck(
instance_type, inst_params, dict_keys_replace={}, list_keys_notimplemented=[]
):
for k in list_keys_notimplemented:
if k in inst_params:
warnings.warn(
UserWarning("Warning key is not implemented", k, " in pytorch")
)
return None
if isinstance(k, tuple):
kk = k[0]
kv = k[1]
else:
kk = k
kv = None
if kk in inst_params:
if (kv is None) or inst_params[kk] in kv:
warnings.warn(
UserWarning("Warning key is not implemented", kk, " in tensorflow")
)
return None
layp = replace_key_params(inst_params, dict_keys_replace)
return instance_type(**layp)

Expand Down Expand Up @@ -619,15 +626,42 @@ def vanillaModel(model):
return model


def is_supported_padding(padding):
return padding.lower() in [
"same",
"valid",
"reflect",
"circular",
"symmetric",
"replicate",
] # "constant",
def is_supported_padding(padding, layer_type):
layertype2padding = {
SpectralConv2d: [
"same",
"zeros",
"valid",
"reflect",
"circular",
"symmetric",
"replicate",
],
FrobeniusConv2d: [
"same",
"zeros",
"valid",
"reflect",
"circular",
"symmetric",
"replicate",
],
PadConv2d: [
"same",
"zeros",
"valid",
"reflect",
"circular",
"symmetric",
"replicate",
],
}
if layer_type in layertype2padding:
return padding.lower() in layertype2padding[layer_type]
else:
assert False
warnings.warn(f"layer {layer_type} type not supported for padding")
return False


def pad_input(x, padding, kernel_size):
Expand Down

0 comments on commit 1fd81f4

Please sign in to comment.