Skip to content

Commit

Permalink
Merge pull request #7283 from mheon/pod_infra_has_exit_cmd
Browse files Browse the repository at this point in the history
Ensure pod infra containers have an exit command
  • Loading branch information
openshift-merge-robot authored Aug 17, 2020
2 parents 47108e2 + c4b2078 commit 8caed30
Show file tree
Hide file tree
Showing 8 changed files with 169 additions and 39 deletions.
6 changes: 5 additions & 1 deletion libpod/container_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,11 @@ type ContainerMiscConfig struct {
// OCIRuntime used to create the container
OCIRuntime string `json:"runtime,omitempty"`
// ExitCommand is the container's exit command.
// This Command will be executed when the container exits
// This Command will be executed when the container exits by Conmon.
// It is usually used to invoke post-run cleanup - for example, in
// Podman, it invokes `podman container cleanup`, which in turn calls
// Libpod's Cleanup() API to unmount the container and clean up its
// network.
ExitCommand []string `json:"exitCommand,omitempty"`
// IsInfra is a bool indicating whether this container is an infra container used for
// sharing kernel namespaces in a pod
Expand Down
20 changes: 20 additions & 0 deletions libpod/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -2130,3 +2130,23 @@ func WithPodHostNetwork() PodCreateOption {
return nil
}
}

// WithPodInfraExitCommand sets an exit command for the pod's infra container.
// Semantics are identical to WithExitCommand() above - the ID of the container
// will be appended to the end of the provided command (note that this will
// specifically be the ID of the infra container *and not the pod's id*.
func WithPodInfraExitCommand(exitCmd []string) PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
return define.ErrPodFinalized
}

if !pod.config.InfraContainer.HasInfraContainer {
return errors.Wrapf(define.ErrInvalidArg, "cannot configure pod infra container exit command as no infra container is being created")
}

pod.config.InfraContainer.ExitCommand = exitCmd

return nil
}
}
11 changes: 10 additions & 1 deletion libpod/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,15 @@ type podState struct {
InfraContainerID string
}

// InfraContainerConfig is the configuration for the pod's infra container
// InfraContainerConfig is the configuration for the pod's infra container.
// Generally speaking, these are equivalent to container configuration options
// you will find in container_config.go (and even named identically), save for
// HasInfraContainer (which determines if an infra container is even created -
// if it is false, no other options in this struct will be used) and HostNetwork
// (this involves the created OCI spec, and as such is not represented directly
// in container_config.go).
// Generally speaking, aside from those two exceptions, these options will set
// the equivalent field in the container's configuration.
type InfraContainerConfig struct {
ConmonPidFile string `json:"conmonPidFile"`
HasInfraContainer bool `json:"makeInfraContainer"`
Expand All @@ -96,6 +104,7 @@ type InfraContainerConfig struct {
UseImageHosts bool `json:"useImageHosts,omitempty"`
HostAdd []string `json:"hostsAdd,omitempty"`
Networks []string `json:"networks,omitempty"`
ExitCommand []string `json:"exitCommand,omitempty"`
}

// ID retrieves the pod's ID
Expand Down
134 changes: 103 additions & 31 deletions libpod/pod_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,20 @@ import (
"github.com/sirupsen/logrus"
)

// Start starts all containers within a pod
// It combines the effects of Init() and Start() on a container
// Start starts all containers within a pod.
// It combines the effects of Init() and Start() on a container.
// If a container has already been initialized it will be started,
// otherwise it will be initialized then started.
// Containers that are already running or have been paused are ignored
// All containers are started independently, in order dictated by their
// dependencies.
// An error and a map[string]error are returned
// An error and a map[string]error are returned.
// If the error is not nil and the map is nil, an error was encountered before
// any containers were started
// any containers were started.
// If map is not nil, an error was encountered when starting one or more
// containers. The container ID is mapped to the error encountered. The error is
// set to ErrCtrExists
// If both error and the map are nil, all containers were started successfully
// set to ErrPodPartialFail.
// If both error and the map are nil, all containers were started successfully.
func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
p.lock.Lock()
defer p.lock.Unlock()
Expand Down Expand Up @@ -72,20 +72,20 @@ func (p *Pod) Stop(ctx context.Context, cleanup bool) (map[string]error, error)
}

// StopWithTimeout stops all containers within a pod that are not already stopped
// Each container will use its own stop timeout
// Each container will use its own stop timeout.
// Only running containers will be stopped. Paused, stopped, or created
// containers will be ignored.
// If cleanup is true, mounts and network namespaces will be cleaned up after
// the container is stopped.
// All containers are stopped independently. An error stopping one container
// will not prevent other containers being stopped.
// An error and a map[string]error are returned
// An error and a map[string]error are returned.
// If the error is not nil and the map is nil, an error was encountered before
// any containers were stopped
// any containers were stopped.
// If map is not nil, an error was encountered when stopping one or more
// containers. The container ID is mapped to the error encountered. The error is
// set to ErrCtrExists
// If both error and the map are nil, all containers were stopped without error
// set to ErrPodPartialFail.
// If both error and the map are nil, all containers were stopped without error.
func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (map[string]error, error) {
p.lock.Lock()
defer p.lock.Unlock()
Expand Down Expand Up @@ -138,10 +138,82 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
ctr.lock.Unlock()
}

p.newPodEvent(events.Stop)

if len(ctrErrors) > 0 {
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers")
}
defer p.newPodEvent(events.Stop)
return nil, nil
}

// Cleanup cleans up all containers within a pod that have stopped.
// All containers are cleaned up independently. An error with one container will
// not prevent other containers being cleaned up.
// An error and a map[string]error are returned.
// If the error is not nil and the map is nil, an error was encountered before
// any containers were cleaned up.
// If map is not nil, an error was encountered when working on one or more
// containers. The container ID is mapped to the error encountered. The error is
// set to ErrPodPartialFail.
// If both error and the map are nil, all containers were paused without error
func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) {
p.lock.Lock()
defer p.lock.Unlock()

if !p.valid {
return nil, define.ErrPodRemoved
}

allCtrs, err := p.runtime.state.PodContainers(p)
if err != nil {
return nil, err
}

ctrErrors := make(map[string]error)

// Clean up all containers
for _, ctr := range allCtrs {
ctr.lock.Lock()

if err := ctr.syncContainer(); err != nil {
ctr.lock.Unlock()
ctrErrors[ctr.ID()] = err
continue
}

// Ignore containers that are running/paused
if !ctr.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateExited) {
ctr.lock.Unlock()
continue
}

// Check for running exec sessions, ignore containers with them.
sessions, err := ctr.getActiveExecSessions()
if err != nil {
ctr.lock.Unlock()
ctrErrors[ctr.ID()] = err
continue
}
if len(sessions) > 0 {
ctr.lock.Unlock()
continue
}

// TODO: Should we handle restart policy here?

ctr.newContainerEvent(events.Cleanup)

if err := ctr.cleanup(ctx); err != nil {
ctrErrors[ctr.ID()] = err
}

ctr.lock.Unlock()
}

if len(ctrErrors) > 0 {
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error cleaning up some containers")
}

return nil, nil
}

Expand All @@ -150,12 +222,12 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
// containers will be ignored.
// All containers are paused independently. An error pausing one container
// will not prevent other containers being paused.
// An error and a map[string]error are returned
// An error and a map[string]error are returned.
// If the error is not nil and the map is nil, an error was encountered before
// any containers were paused
// any containers were paused.
// If map is not nil, an error was encountered when pausing one or more
// containers. The container ID is mapped to the error encountered. The error is
// set to ErrCtrExists
// set to ErrPodPartialFail.
// If both error and the map are nil, all containers were paused without error
func (p *Pod) Pause() (map[string]error, error) {
p.lock.Lock()
Expand Down Expand Up @@ -219,13 +291,13 @@ func (p *Pod) Pause() (map[string]error, error) {
// containers will be ignored.
// All containers are unpaused independently. An error unpausing one container
// will not prevent other containers being unpaused.
// An error and a map[string]error are returned
// An error and a map[string]error are returned.
// If the error is not nil and the map is nil, an error was encountered before
// any containers were unpaused
// any containers were unpaused.
// If map is not nil, an error was encountered when unpausing one or more
// containers. The container ID is mapped to the error encountered. The error is
// set to ErrCtrExists
// If both error and the map are nil, all containers were unpaused without error
// set to ErrPodPartialFail.
// If both error and the map are nil, all containers were unpaused without error.
func (p *Pod) Unpause() (map[string]error, error) {
p.lock.Lock()
defer p.lock.Unlock()
Expand Down Expand Up @@ -280,13 +352,13 @@ func (p *Pod) Unpause() (map[string]error, error) {
// All containers are started independently, in order dictated by their
// dependencies. An error restarting one container
// will not prevent other containers being restarted.
// An error and a map[string]error are returned
// An error and a map[string]error are returned.
// If the error is not nil and the map is nil, an error was encountered before
// any containers were restarted
// any containers were restarted.
// If map is not nil, an error was encountered when restarting one or more
// containers. The container ID is mapped to the error encountered. The error is
// set to ErrCtrExists
// If both error and the map are nil, all containers were restarted without error
// set to ErrPodPartialFail.
// If both error and the map are nil, all containers were restarted without error.
func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
p.lock.Lock()
defer p.lock.Unlock()
Expand Down Expand Up @@ -328,17 +400,17 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
return nil, nil
}

// Kill sends a signal to all running containers within a pod
// Kill sends a signal to all running containers within a pod.
// Signals will only be sent to running containers. Containers that are not
// running will be ignored. All signals are sent independently, and sending will
// continue even if some containers encounter errors.
// An error and a map[string]error are returned
// An error and a map[string]error are returned.
// If the error is not nil and the map is nil, an error was encountered before
// any containers were signalled
// any containers were signalled.
// If map is not nil, an error was encountered when signalling one or more
// containers. The container ID is mapped to the error encountered. The error is
// set to ErrCtrExists
// If both error and the map are nil, all containers were signalled successfully
// set to ErrPodPartialFail.
// If both error and the map are nil, all containers were signalled successfully.
func (p *Pod) Kill(signal uint) (map[string]error, error) {
p.lock.Lock()
defer p.lock.Unlock()
Expand Down Expand Up @@ -393,8 +465,8 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) {
return nil, nil
}

// Status gets the status of all containers in the pod
// Returns a map of Container ID to Container Status
// Status gets the status of all containers in the pod.
// Returns a map of Container ID to Container Status.
func (p *Pod) Status() (map[string]define.ContainerStatus, error) {
p.lock.Lock()
defer p.lock.Unlock()
Expand Down Expand Up @@ -430,7 +502,7 @@ func containerStatusFromContainers(allCtrs []*Container) (map[string]define.Cont
return status, nil
}

// Inspect returns a PodInspect struct to describe the pod
// Inspect returns a PodInspect struct to describe the pod.
func (p *Pod) Inspect() (*define.InspectPodData, error) {
p.lock.Lock()
defer p.lock.Unlock()
Expand Down
6 changes: 6 additions & 0 deletions libpod/runtime_pod_infra_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,9 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, rawIm
return nil, errors.Wrapf(err, "error removing network namespace from pod %s infra container", p.ID())
}

// For each option in InfraContainerConfig - if set, pass into
// the infra container we're creating with the appropriate
// With... option.
if p.config.InfraContainer.StaticIP != nil {
options = append(options, WithStaticIP(p.config.InfraContainer.StaticIP))
}
Expand All @@ -107,6 +110,9 @@ func (r *Runtime) makeInfraContainer(ctx context.Context, p *Pod, imgName, rawIm
if len(p.config.InfraContainer.HostAdd) > 0 {
options = append(options, WithHosts(p.config.InfraContainer.HostAdd))
}
if len(p.config.InfraContainer.ExitCommand) > 0 {
options = append(options, WithExitCommand(p.config.InfraContainer.ExitCommand))
}
}

g.SetRootReadonly(true)
Expand Down
9 changes: 8 additions & 1 deletion pkg/api/handlers/libpod/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,10 +123,17 @@ func PodStop(w http.ResponseWriter, r *http.Request) {
} else {
responses, stopError = pod.Stop(r.Context(), false)
}
if stopError != nil {
if stopError != nil && errors.Cause(stopError) != define.ErrPodPartialFail {
utils.Error(w, "Something went wrong", http.StatusInternalServerError, err)
return
}
// Try to clean up the pod - but only warn on failure, it's nonfatal.
if cleanupCtrs, cleanupErr := pod.Cleanup(r.Context()); cleanupErr != nil {
logrus.Errorf("Error cleaning up pod %s: %v", pod.ID(), cleanupErr)
for id, err := range cleanupCtrs {
logrus.Errorf("Error cleaning up pod %s container %s: %v", pod.ID(), id, err)
}
}
var errs []error //nolint
for _, err := range responses {
errs = append(errs, err)
Expand Down
6 changes: 3 additions & 3 deletions pkg/bindings/test/pods_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ var _ = Describe("Podman pods", func() {
Expect(response.State).To(Equal(define.PodStateExited))
for _, i := range response.Containers {
Expect(define.StringToContainerStatus(i.State)).
To(Equal(define.ContainerStateStopped))
To(Equal(define.ContainerStateExited))
}

// Stop an already stopped pod
Expand Down Expand Up @@ -309,7 +309,7 @@ var _ = Describe("Podman pods", func() {
Expect(response.State).To(Equal(define.PodStateExited))
for _, i := range response.Containers {
Expect(define.StringToContainerStatus(i.State)).
To(Equal(define.ContainerStateStopped))
To(Equal(define.ContainerStateExited))
}
_, err = pods.Stop(bt.conn, newpod2, nil)
Expect(err).To(BeNil())
Expand All @@ -318,7 +318,7 @@ var _ = Describe("Podman pods", func() {
Expect(response.State).To(Equal(define.PodStateExited))
for _, i := range response.Containers {
Expect(define.StringToContainerStatus(i.State)).
To(Equal(define.ContainerStateStopped))
To(Equal(define.ContainerStateExited))
}
_, err = pods.Prune(bt.conn)
Expect(err).To(BeNil())
Expand Down
16 changes: 14 additions & 2 deletions pkg/specgen/generate/pod_create.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,14 @@ func MakePod(p *specgen.PodSpecGenerator, rt *libpod.Runtime) (*libpod.Pod, erro
if err := p.Validate(); err != nil {
return nil, err
}
options, err := createPodOptions(p)
options, err := createPodOptions(p, rt)
if err != nil {
return nil, err
}
return rt.NewPod(context.Background(), options...)
}

func createPodOptions(p *specgen.PodSpecGenerator) ([]libpod.PodCreateOption, error) {
func createPodOptions(p *specgen.PodSpecGenerator, rt *libpod.Runtime) ([]libpod.PodCreateOption, error) {
var (
options []libpod.PodCreateOption
)
Expand All @@ -31,6 +31,18 @@ func createPodOptions(p *specgen.PodSpecGenerator) ([]libpod.PodCreateOption, er
return nil, err
}
options = append(options, nsOptions...)

// Make our exit command
storageConfig := rt.StorageConfig()
runtimeConfig, err := rt.GetConfig()
if err != nil {
return nil, err
}
exitCommand, err := CreateExitCommandArgs(storageConfig, runtimeConfig, logrus.IsLevelEnabled(logrus.DebugLevel), false, false)
if err != nil {
return nil, errors.Wrapf(err, "error creating infra container exit command")
}
options = append(options, libpod.WithPodInfraExitCommand(exitCommand))
}
if len(p.CgroupParent) > 0 {
options = append(options, libpod.WithPodCgroupParent(p.CgroupParent))
Expand Down

0 comments on commit 8caed30

Please sign in to comment.