Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

With --rm option remove container if podman run fails #15060

Merged
merged 1 commit into from
Jul 28, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 18 additions & 6 deletions pkg/domain/infra/abi/containers.go
Original file line number Diff line number Diff line change
Expand Up @@ -1061,6 +1061,15 @@ func (ic *ContainerEngine) Diff(ctx context.Context, namesOrIDs []string, opts e
}

func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.ContainerRunOptions) (*entities.ContainerRunReport, error) {
removeContainer := func(ctr *libpod.Container, force bool) error {
var timeout *uint
if err := ic.Libpod.RemoveContainer(ctx, ctr, force, true, timeout); err != nil {
logrus.Debugf("unable to remove container %s after failing to start and attach to it: %v", ctr.ID(), err)
return err
}
return nil
}

warn, err := generate.CompleteSpec(ctx, ic.Libpod, opts.Spec)
if err != nil {
return nil, err
Expand All @@ -1081,6 +1090,8 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta

if opts.CIDFile != "" {
if err := util.CreateCidFile(opts.CIDFile, ctr.ID()); err != nil {
// If you fail to create CIDFile then remove the container
_ = removeContainer(ctr, true)
return nil, err
}
}
Expand All @@ -1098,6 +1109,11 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
if err := ctr.Start(ctx, true); err != nil {
// This means the command did not exist
report.ExitCode = define.ExitCode(err)
if opts.Rm {
if rmErr := removeContainer(ctr, true); rmErr != nil && !errors.Is(rmErr, define.ErrNoSuchCtr) {
logrus.Errorf("Container %s failed to be removed", ctr.ID())
}
}
return &report, err
}

Expand All @@ -1114,10 +1130,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
return &report, nil
}
if opts.Rm {
var timeout *uint
if deleteError := ic.Libpod.RemoveContainer(ctx, ctr, true, false, timeout); deleteError != nil {
logrus.Debugf("unable to remove container %s after failing to start and attach to it", ctr.ID())
}
_ = removeContainer(ctr, true)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same here

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

opts.RM is checked. removeContainer is logging the debug message itself.

}
if errors.Is(err, define.ErrWillDeadlock) {
logrus.Debugf("Deadlock error on %q: %v", ctr.ID(), err)
Expand All @@ -1129,8 +1142,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
}
report.ExitCode = ic.GetContainerExitCode(ctx, ctr)
if opts.Rm && !ctr.ShouldRestart(ctx) {
var timeout *uint
if err := ic.Libpod.RemoveContainer(ctx, ctr, false, true, timeout); err != nil {
if err := removeContainer(ctr, false); err != nil {
if errors.Is(err, define.ErrNoSuchCtr) ||
errors.Is(err, define.ErrCtrRemoved) {
logrus.Infof("Container %s was already removed, skipping --rm", ctr.ID())
Expand Down
22 changes: 16 additions & 6 deletions pkg/domain/infra/tunnel/containers.go
Original file line number Diff line number Diff line change
Expand Up @@ -787,8 +787,17 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
for _, w := range con.Warnings {
fmt.Fprintf(os.Stderr, "%s\n", w)
}
removeContainer := func(id string, force bool) error {
removeOptions := new(containers.RemoveOptions).WithVolumes(true).WithForce(force)
reports, err := containers.Remove(ic.ClientCtx, id, removeOptions)
logIfRmError(id, err, reports)
return err
}

if opts.CIDFile != "" {
if err := util.CreateCidFile(opts.CIDFile, con.ID); err != nil {
// If you fail to create CIDFile then remove the container
_ = removeContainer(con.ID, true)
return nil, err
}
}
Expand All @@ -800,6 +809,11 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
err := containers.Start(ic.ClientCtx, con.ID, new(containers.StartOptions).WithRecursive(true))
if err != nil {
report.ExitCode = define.ExitCode(err)
if opts.Rm {
if rmErr := removeContainer(con.ID, true); rmErr != nil && !errors.Is(rmErr, define.ErrNoSuchCtr) {
logrus.Errorf("Container %s failed to be removed", con.ID)
}
}
}
return &report, err
}
Expand All @@ -812,10 +826,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta

report.ExitCode = define.ExitCode(err)
if opts.Rm {
reports, rmErr := containers.Remove(ic.ClientCtx, con.ID, new(containers.RemoveOptions).WithForce(false).WithVolumes(true))
if rmErr != nil || reports[0].Err != nil {
logrus.Debugf("unable to remove container %s after failing to start and attach to it", con.ID)
}
_ = removeContainer(con.ID, false)
}
return &report, err
}
Expand All @@ -831,8 +842,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta
}

if !shouldRestart {
reports, err := containers.Remove(ic.ClientCtx, con.ID, new(containers.RemoveOptions).WithForce(false).WithVolumes(true))
logIfRmError(con.ID, err, reports)
_ = removeContainer(con.ID, false)
}
}()
}
Expand Down
16 changes: 16 additions & 0 deletions test/system/030-run.bats
Original file line number Diff line number Diff line change
Expand Up @@ -871,4 +871,20 @@ EOF
run_podman container rm -fa
}

@test "podman run failed --rm " {
port=$(random_free_port)

# Run two containers with the same port bindings. The second must fail
run_podman run -p $port:80 --rm -d --name c_ok $IMAGE top
run_podman 126 run -p $port:80 -d --name c_fail_no_rm $IMAGE top
run_podman 126 run -p $port:80 --rm -d --name c_fail_with_rm $IMAGE top
# Prior to #15060, the third container would still show up in ps -a
run_podman ps -a --sort names --format '{{.Image}}--{{.Names}}'
is "$output" "$IMAGE--c_fail_no_rm
$IMAGE--c_ok" \
"podman ps -a shows running & failed containers, but not failed-with-rm"

run_podman container rm -f -t 0 c_ok c_fail_no_rm
}

# vim: filetype=sh