Skip to content

Commit

Permalink
libpod: drop hack to set conmon cgroup pids.max=1
Browse files Browse the repository at this point in the history
avoid forcing the pids.max = 1 limit to avoid cleanup processes, which
is racy since the cleanup processes could be triggered by the
container exiting; and it doesn't work with rootless when it cannot
use cgroups, i.e. cgroupfs and cgroup v1).

Closes: containers#13382

[NO NEW TESTS NEEDED] it doesn't add any new functionality

Signed-off-by: Giuseppe Scrivano <[email protected]>
  • Loading branch information
giuseppe committed Mar 2, 2022
1 parent 7877b02 commit c4472e1
Showing 1 changed file with 25 additions and 29 deletions.
54 changes: 25 additions & 29 deletions libpod/runtime_pod_linux.go
Original file line number Diff line number Diff line change
@@ -1,21 +1,23 @@
//go:build linux
// +build linux

package libpod

import (
"context"
"fmt"
"os"
"path"
"path/filepath"
"strings"
"time"

"github.com/containers/common/pkg/cgroups"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/rootless"
"github.com/containers/podman/v4/pkg/specgen"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
Expand Down Expand Up @@ -195,10 +197,15 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
// Go through and lock all containers so we can operate on them all at
// once.
// First loop also checks that we are ready to go ahead and remove.
containersLocked := true
for _, ctr := range ctrs {
ctrLock := ctr.lock
ctrLock.Lock()
defer ctrLock.Unlock()
defer func() {
if containersLocked {
ctrLock.Unlock()
}
}()

// If we're force-removing, no need to check status.
if force {
Expand All @@ -216,32 +223,6 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
}
}

// We're going to be removing containers.
// If we are Cgroupfs cgroup driver, to avoid races, we need to hit
// the pod and conmon Cgroups with a PID limit to prevent them from
// spawning any further processes (particularly cleanup processes) which
// would prevent removing the Cgroups.
if p.runtime.config.Engine.CgroupManager == config.CgroupfsCgroupsManager {
// Get the conmon Cgroup
conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon")
conmonCgroup, err := cgroups.Load(conmonCgroupPath)
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
logrus.Errorf("Retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
}

// New resource limits
resLimits := new(spec.LinuxResources)
resLimits.Pids = new(spec.LinuxPids)
resLimits.Pids.Limit = 1 // Inhibit forks with very low pids limit

// Don't try if we failed to retrieve the cgroup
if err == nil {
if err := conmonCgroup.Update(resLimits); err != nil {
logrus.Warnf("Error updating pod %s conmon cgroup PID limit: %v", p.ID(), err)
}
}
}

var removalErr error

ctrNamedVolumes := make(map[string]*ContainerNamedVolume)
Expand Down Expand Up @@ -300,6 +281,12 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
}
}

// let's unlock the containers so the cleanup processes can terminate their execution
for _, ctr := range ctrs {
ctr.lock.Unlock()
}
containersLocked = false

// Remove pod cgroup, if present
if p.state.CgroupPath != "" {
logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath)
Expand Down Expand Up @@ -328,7 +315,16 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
}
}
if err == nil {
if err := conmonCgroup.Delete(); err != nil {
for attempts := 0; attempts < 50; attempts++ {
err = conmonCgroup.Delete()
if err == nil || os.IsNotExist(err) {
// success
err = nil
break
}
time.Sleep(time.Millisecond * 100)
}
if err != nil {
if removalErr == nil {
removalErr = errors.Wrapf(err, "error removing pod %s conmon cgroup", p.ID())
} else {
Expand Down

0 comments on commit c4472e1

Please sign in to comment.