diff --git a/libpod/container_internal.go b/libpod/container_internal.go index c168144426..2cba923c26 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -225,6 +225,15 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error { } func (c *Container) shouldRestart() bool { + if c.config.HealthCheckOnFailureAction == define.HealthCheckOnFailureActionRestart { + isUnhealthy, err := c.isUnhealthy() + if err != nil { + logrus.Errorf("Checking if container is unhealthy: %v", err) + } else if isUnhealthy { + return true + } + } + // If we did not get a restart policy match, return false // Do the same if we're not a policy that restarts. if !c.state.RestartPolicyMatch || @@ -264,6 +273,12 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err return false, err } + if c.config.HealthCheckConfig != nil { + if err := c.removeTransientFiles(ctx); err != nil { + return false, err + } + } + // Is the container running again? // If so, we don't have to do anything if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) { @@ -1421,6 +1436,7 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (retEr if err := c.stop(timeout); err != nil { return err } + if c.config.HealthCheckConfig != nil { if err := c.removeTransientFiles(context.Background()); err != nil { logrus.Error(err.Error()) diff --git a/libpod/healthcheck.go b/libpod/healthcheck.go index 1a81360f41..cbf7616f70 100644 --- a/libpod/healthcheck.go +++ b/libpod/healthcheck.go @@ -2,7 +2,6 @@ package libpod import ( "bufio" - "context" "errors" "fmt" "os" @@ -158,8 +157,12 @@ func (c *Container) processHealthCheckStatus(status string) error { } case define.HealthCheckOnFailureActionRestart: - if err := c.RestartWithTimeout(context.Background(), c.config.StopTimeout); err != nil { - return fmt.Errorf("restarting container after health-check turned unhealthy: %w", err) + // We let the cleanup process handle the restart. Otherwise + // the container would be restarted in the context of a + // transient systemd unit which may cause undesired side + // effects. + if err := c.Stop(); err != nil { + return fmt.Errorf("restarting/stopping container after health-check turned unhealthy: %w", err) } case define.HealthCheckOnFailureActionStop: @@ -212,6 +215,18 @@ func (c *Container) updateHealthStatus(status string) error { return os.WriteFile(c.healthCheckLogPath(), newResults, 0700) } +// isUnhealthy returns if the current health check status in unhealthy. +func (c *Container) isUnhealthy() (bool, error) { + if !c.HasHealthCheck() { + return false, nil + } + healthCheck, err := c.getHealthCheckLog() + if err != nil { + return false, err + } + return healthCheck.Status == define.HealthCheckUnhealthy, nil +} + // UpdateHealthCheckLog parses the health check results and writes the log func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPeriod bool) (string, error) { c.lock.Lock() diff --git a/test/system/220-healthcheck.bats b/test/system/220-healthcheck.bats index 82b89d4482..02f4c00724 100644 --- a/test/system/220-healthcheck.bats +++ b/test/system/220-healthcheck.bats @@ -137,21 +137,24 @@ Log[-1].Output | \"Uh-oh on stdout!\\\nUh-oh on stderr!\" run_podman 1 healthcheck run $ctr is "$output" "unhealthy" "output from 'podman healthcheck run'" - run_podman inspect $ctr --format "{{.State.Status}} {{.Config.HealthcheckOnFailureAction}}" - if [[ $policy == "restart" ]];then - # Container has been restarted and health check works again - is "$output" "running $policy" "container has been restarted" - run_podman container inspect $ctr --format "{{.State.Healthcheck.FailingStreak}}" - is "$output" "0" "Failing streak of restarted container should be 0 again" - run_podman healthcheck run $ctr + if [[ $policy == "restart" ]];then + # Make sure the container transitions back to running + run_podman wait --condition=running $ctr + run_podman inspect $ctr --format "{{.RestartCount}}" + assert "${#lines[@]}" != 0 "Container has been restarted at least once" + run_podman container inspect $ctr --format "{{.State.Healthcheck.FailingStreak}}" + is "$output" "0" "Failing streak of restarted container should be 0 again" + run_podman healthcheck run $ctr elif [[ $policy == "none" ]];then + run_podman inspect $ctr --format "{{.State.Status}} {{.Config.HealthcheckOnFailureAction}}" # Container is still running and health check still broken is "$output" "running $policy" "container continued running" run_podman 1 healthcheck run $ctr - is "$output" "unhealthy" "output from 'podman healthcheck run'" - else - # kill and stop yield the container into a non-running state - is "$output" ".* $policy" "container was stopped/killed" + is "$output" "unhealthy" "output from 'podman healthcheck run' (policy: $policy)" + else + run_podman inspect $ctr --format "{{.State.Status}} {{.Config.HealthcheckOnFailureAction}}" + # kill and stop yield the container into a non-running state + is "$output" ".* $policy" "container was stopped/killed (policy: $policy)" assert "$output" != "running $policy" # also make sure that it's not stuck in the stopping state assert "$output" != "stopping $policy" @@ -162,4 +165,39 @@ Log[-1].Output | \"Uh-oh on stdout!\\\nUh-oh on stderr!\" done } +@test "podman healthcheck --health-on-failure with interval" { + ctr="healthcheck_c" + + for policy in stop kill restart ;do + t0=$(date --iso-8601=seconds) + run_podman run -d --name $ctr \ + --health-cmd /bin/false \ + --health-retries=1 \ + --health-on-failure=$policy \ + --health-interval=1s \ + $IMAGE top + + if [[ $policy == "restart" ]];then + # Sleeping for 2 seconds makes the test much faster than using + # podman-wait which would compete with the container getting + # restarted. + sleep 2 + # Make sure the container transitions back to running + run_podman wait --condition=running $ctr + run_podman inspect $ctr --format "{{.RestartCount}}" + assert "${#lines[@]}" != 0 "Container has been restarted at least once" + else + # kill and stop yield the container into a non-running state + run_podman wait $ctr + run_podman inspect $ctr --format "{{.State.Status}} {{.Config.HealthcheckOnFailureAction}}" + is "$output" ".* $policy" "container was stopped/killed (policy: $policy)" + assert "$output" != "running $policy" + # also make sure that it's not stuck in the stopping state + assert "$output" != "stopping $policy" + fi + + run_podman rm -f -t0 $ctr + done +} + # vim: filetype=sh