From 68a58c9a12ce642494f7f166f103a5abc323078d Mon Sep 17 00:00:00 2001 From: Valentin Rothberg Date: Wed, 8 Feb 2023 09:12:43 +0100 Subject: [PATCH] kube play: do not teardown unconditionally on error Commit 2f29639bd3aa9 added a UX improvement to cleanup/teardown when running the specified YAML has failed. However, the teardown happens unconditionally such that rerunning the same YAML file will teardown the previously created workload instead of just failing with a name-conflict error (e.g., "pod already exists"). The regression popped up testing the Ansible system role with Podman v4.4.0. For now, do not teardown at all on error to quickly fix this regression for the upcoming Podman v4.4.1 release. The UX improvement is still desired but must be conditional and only happen on newly created resources, which probably requires moving it down to the backend. Signed-off-by: Valentin Rothberg --- cmd/podman/kube/play.go | 19 ++++++++++++------- test/system/700-play.bats | 6 ++++++ 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/cmd/podman/kube/play.go b/cmd/podman/kube/play.go index 4a62f02759..37ab8cc886 100644 --- a/cmd/podman/kube/play.go +++ b/cmd/podman/kube/play.go @@ -279,14 +279,19 @@ func play(cmd *cobra.Command, args []string) error { } if err := kubeplay(reader); err != nil { + // FIXME: The cleanup logic below must be fixed to only remove + // resources that were created before a failure. Otherwise, + // rerunning the same YAML file will cause an error and remove + // the previously created workload. + // // teardown any containers, pods, and volumes that might have created before we hit the error - teardownReader, trErr := readerFromArg(args[0]) - if trErr != nil { - return trErr - } - if tErr := teardown(teardownReader, entities.PlayKubeDownOptions{Force: true}, true); tErr != nil && !errorhandling.Contains(tErr, define.ErrNoSuchPod) { - return fmt.Errorf("error tearing down workloads %q after kube play error %q", tErr, err) - } + // teardownReader, trErr := readerFromArg(args[0]) + // if trErr != nil { + // return trErr + // } + // if tErr := teardown(teardownReader, entities.PlayKubeDownOptions{Force: true}, true); tErr != nil && !errorhandling.Contains(tErr, define.ErrNoSuchPod) { + // return fmt.Errorf("error tearing down workloads %q after kube play error %q", tErr, err) + // } return err } diff --git a/test/system/700-play.bats b/test/system/700-play.bats index 248bdec27a..9c8867a29d 100644 --- a/test/system/700-play.bats +++ b/test/system/700-play.bats @@ -99,6 +99,12 @@ RELABEL="system_u:object_r:container_file_t:s0" is "$output" "${RELABEL} $TESTDIR" "selinux relabel should have happened" fi + # Now rerun twice to make sure nothing gets removed + run_podman 125 play kube $PODMAN_TMPDIR/test.yaml + is "$output" ".* is in use: pod already exists" + run_podman 125 play kube $PODMAN_TMPDIR/test.yaml + is "$output" ".* is in use: pod already exists" + run_podman stop -a -t 0 run_podman pod rm -t 0 -f test_pod }