Skip to content

Commit

Permalink
container stop: release lock before calling the runtime
Browse files Browse the repository at this point in the history
Podman defers stopping the container to the runtime, which can take some
time.  Keeping the lock while waiting for the runtime to complete the
stop procedure, prevents other commands from acquiring the lock as shown
in containers#8501.

To improve the user experience, release the lock before invoking the
runtime, and re-acquire the lock when the runtime is finished.  Also
introduce an intermediate "stopping" to properly distinguish from
"stopped" containers etc.

Fixes: containers#8501
Signed-off-by: Valentin Rothberg <[email protected]>
  • Loading branch information
vrothberg committed Jan 14, 2021
1 parent d2503ae commit d54478d
Show file tree
Hide file tree
Showing 5 changed files with 84 additions and 4 deletions.
10 changes: 8 additions & 2 deletions libpod/container_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,13 @@ func (c *Container) Kill(signal uint) error {
}

// TODO: Is killing a paused container OK?
if c.state.State != define.ContainerStateRunning {
switch c.state.State {
case define.ContainerStateRunning, define.ContainerStateStopping:
// Note that killing containers in "stopping" state is okay.
// In that state, the Podman is waiting for the runtime to
// stop the container and if that is taking too long, a user
// may have decided to kill the container after all.
default:
return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String())
}

Expand Down Expand Up @@ -539,7 +545,7 @@ func (c *Container) Cleanup(ctx context.Context) error {
}

// Check if state is good
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateExited) {
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateStopping, define.ContainerStateExited) {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
}

Expand Down
43 changes: 41 additions & 2 deletions libpod/container_internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -758,7 +758,7 @@ func (c *Container) isStopped() (bool, error) {
return true, err
}

return !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused), nil
return !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopping), nil
}

// save container state to the database
Expand Down Expand Up @@ -1284,10 +1284,49 @@ func (c *Container) stop(timeout uint) error {
return err
}

// Set the container state to "stopping" and unlock the container
// before handing it over to conmon to unblock other commands. #8501
// demonstrates nicely that a high stop timeout will block even simple
// commands such as `podman ps` from progressing if the container lock
// is held when busy-waiting for the container to be stopped.
c.state.State = define.ContainerStateStopping
if err := c.save(); err != nil {
return errors.Wrapf(err, "error saving container %s state before stopping", c.ID())
}
if !c.batched {
c.lock.Unlock()
}

if err := c.ociRuntime.StopContainer(c, timeout, all); err != nil {
return err
}

if !c.batched {
c.lock.Lock()
if err := c.syncContainer(); err != nil {
switch errors.Cause(err) {
// If the container has already been removed (e.g., via
// the cleanup process), there's nothing left to do.
case define.ErrNoSuchCtr, define.ErrCtrRemoved:
return nil
default:
return err
}
}
}

// Since we're now subject to a race condition with other processes who
// may have altered the state (and other data), let's check if the
// state has changed. If so, we should return immediately and log a
// warning.
if c.state.State != define.ContainerStateStopping {
logrus.Warnf(
"Container %q state changed from %q to %q while waiting for it to be stopped: discontinuing stop procedure as another process interfered",
c.ID(), define.ContainerStateStopping, c.state.State,
)
return nil
}

c.newContainerEvent(events.Stop)

c.state.PID = 0
Expand Down Expand Up @@ -2094,7 +2133,7 @@ func (c *Container) sortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume
// Check for an exit file, and handle one if present
func (c *Container) checkExitFile() error {
// If the container's not running, nothing to do.
if !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
if !c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused, define.ContainerStateStopping) {
return nil
}

Expand Down
5 changes: 5 additions & 0 deletions libpod/define/containerstate.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ const (
// ContainerStateRemoving indicates the container is in the process of
// being removed.
ContainerStateRemoving ContainerStatus = iota
// ContainerStateStopping indicates the container is in the process of
// being stopped.
ContainerStateStopping ContainerStatus = iota
)

// ContainerStatus returns a string representation for users
Expand All @@ -50,6 +53,8 @@ func (t ContainerStatus) String() string {
return "exited"
case ContainerStateRemoving:
return "removing"
case ContainerStateStopping:
return "stopping"
}
return "bad state"
}
Expand Down
2 changes: 2 additions & 0 deletions libpod/rootless_cni_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,8 @@ func DeallocRootlessCNI(ctx context.Context, c *Container) error {
logrus.Warn(err)
}
logrus.Debugf("rootless CNI: removing infra container %q", infra.ID())
infra.lock.Lock()
defer infra.lock.Unlock()
if err := c.runtime.removeContainer(ctx, infra, true, false, true); err != nil {
return err
}
Expand Down
28 changes: 28 additions & 0 deletions test/system/050-stop.bats
Original file line number Diff line number Diff line change
Expand Up @@ -67,4 +67,32 @@ load helpers
done
}

# Regression test for #8501
@test "podman stop - unlock while waiting for timeout" {
# Test that the container state transitions to "stopping" and that other
# commands can get the container's lock. To do that, run a container that
# ingores SIGTERM such that the Podman would wait 20 seconds for the stop
# to finish. This gives us enough time to try some commands and inspect
# the container's status.

run_podman run --name stopme -d $IMAGE sh -c \
"trap 'echo Received SIGTERM, ignoring' SIGTERM; echo READY; while :; do sleep 1; done"

# Stop the container in the background
$PODMAN stop -t 20 stopme &

# Other commands can acquire the lock
run_podman ps -a

# The container state transitioned to "stopping"
run_podman inspect --format '{{.State.Status}}' stopme
is "$output" "stopping" "Status of container should be 'stopping'"

run_podman kill stopme

# Exit code should be 137 as it was killed
run_podman inspect --format '{{.State.ExitCode}}' stopme
is "$output" "137" "Exit code of killed container"
}

# vim: filetype=sh

0 comments on commit d54478d

Please sign in to comment.