Skip to content

Commit

Permalink
Merge pull request containers#15547 from vrothberg/RUN-1606
Browse files Browse the repository at this point in the history
Support auto updates for Kubernetes workloads
  • Loading branch information
openshift-merge-robot authored Sep 6, 2022
2 parents 2f555c0 + 274d34a commit db5ec4d
Show file tree
Hide file tree
Showing 9 changed files with 320 additions and 98 deletions.
12 changes: 12 additions & 0 deletions docs/source/markdown/podman-auto-update.1.md.in
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,18 @@ This data is then being used in the auto-update sequence to instruct systemd (vi
Note that **podman auto-update** relies on systemd. The systemd units are expected to be generated with **[podman-generate-systemd --new](podman-generate-systemd.1.md#--new)**, or similar units that create new containers in order to run the updated images.
Systemd units that start and stop a container cannot run a new image.

### Auto Updates and Kubernetes YAML

Podman supports auto updates for Kubernetes workloads. As mentioned above, `podman auto-update` requires the containers to be running systemd. Podman ships with a systemd template that can be instantiated with a Kubernetes YAML file, see podman-generate-systemd(1).

To enable auto updates for containers running in a Kubernetes workload, set the following Podman-specific annotations in the YAML:
* `io.containers.autoupdate: "registry|local"` to apply the auto-update policy to all containers
* `io.containers.autoupdate/$container: "registry|local"` to apply the auto-update policy to `$container` only
* `io.containers.sdnotify: "conmon|container"` to apply the sdnotify policy to all containers
* `io.containers.sdnotify/$container: "conmon|container"` to apply the sdnotify policy to `$container` only

By default, the autoupdate policy is set to "disabled", the sdnotify policy is set to "conmon".

### Systemd Unit and Timer

Podman ships with a `podman-auto-update.service` systemd unit. This unit is triggered daily at midnight by the `podman-auto-update.timer` systemd timer. The timer can be altered for custom time-based updates if desired. The unit can further be invoked by other systemd units (e.g., via the dependency tree) or manually via **systemctl start podman-auto-update.service**.
Expand Down
156 changes: 94 additions & 62 deletions pkg/autoupdate/autoupdate.go
Original file line number Diff line number Diff line change
Expand Up @@ -153,18 +153,19 @@ func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.A
}

// Find auto-update tasks and assemble them by unit.
errors := auto.assembleTasks(ctx)
allErrors := auto.assembleTasks(ctx)

// Nothing to do.
if len(auto.unitToTasks) == 0 {
return nil, errors
return nil, allErrors
}

// Connect to DBUS.
conn, err := systemd.ConnectToDBUS()
if err != nil {
logrus.Errorf(err.Error())
return nil, []error{err}
allErrors = append(allErrors, err)
return nil, allErrors
}
defer conn.Close()
auto.conn = conn
Expand All @@ -174,72 +175,94 @@ func AutoUpdate(ctx context.Context, runtime *libpod.Runtime, options entities.A
// Update all images/container according to their auto-update policy.
var allReports []*entities.AutoUpdateReport
for unit, tasks := range auto.unitToTasks {
// Sanity check: we'll support that in the future.
if len(tasks) != 1 {
errors = append(errors, fmt.Errorf("only 1 task per unit supported but unit %s has %d", unit, len(tasks)))
return nil, errors
unitErrors := auto.updateUnit(ctx, unit, tasks)
allErrors = append(allErrors, unitErrors...)
for _, task := range tasks {
allReports = append(allReports, task.report())
}
}

for _, task := range tasks {
err := func() error {
// Transition from state to state. Will be
// split into multiple loops in the future to
// support more than one container/task per
// unit.
updateAvailable, err := task.updateAvailable(ctx)
if err != nil {
task.status = statusFailed
return fmt.Errorf("checking image updates for container %s: %w", task.container.ID(), err)
}

if !updateAvailable {
task.status = statusNotUpdated
return nil
}

if options.DryRun {
task.status = statusPending
return nil
}

if err := task.update(ctx); err != nil {
task.status = statusFailed
return fmt.Errorf("updating image for container %s: %w", task.container.ID(), err)
}

updateError := auto.restartSystemdUnit(ctx, unit)
if updateError == nil {
task.status = statusUpdated
return nil
}

if !options.Rollback {
task.status = statusFailed
return fmt.Errorf("restarting unit %s for container %s: %w", task.unit, task.container.ID(), err)
}

if err := task.rollbackImage(); err != nil {
task.status = statusFailed
return fmt.Errorf("rolling back image for container %s: %w", task.container.ID(), err)
}

if err := auto.restartSystemdUnit(ctx, unit); err != nil {
task.status = statusFailed
return fmt.Errorf("restarting unit %s for container %s during rollback: %w", task.unit, task.container.ID(), err)
}

task.status = statusRolledBack
return nil
}()
return allReports, allErrors
}

// updateUnit auto updates the tasks in the specified systemd unit.
func (u *updater) updateUnit(ctx context.Context, unit string, tasks []*task) []error {
var errors []error
tasksUpdated := false

for _, task := range tasks {
err := func() error { // Use an anonymous function to avoid spaghetti continue's
updateAvailable, err := task.updateAvailable(ctx)
if err != nil {
errors = append(errors, err)
task.status = statusFailed
return fmt.Errorf("checking image updates for container %s: %w", task.container.ID(), err)
}
allReports = append(allReports, task.report())

if !updateAvailable {
task.status = statusNotUpdated
return nil
}

if u.options.DryRun {
task.status = statusPending
return nil
}

if err := task.update(ctx); err != nil {
task.status = statusFailed
return fmt.Errorf("updating image for container %s: %w", task.container.ID(), err)
}

tasksUpdated = true
return nil
}()

if err != nil {
errors = append(errors, err)
}
}

return allReports, errors
// If no task has been updated, we can jump directly to the next unit.
if !tasksUpdated {
return errors
}

updateError := u.restartSystemdUnit(ctx, unit)
for _, task := range tasks {
if updateError == nil {
task.status = statusUpdated
} else {
task.status = statusFailed
}
}

// Jump to the next unit on successful update or if rollbacks are disabled.
if updateError == nil || !u.options.Rollback {
return errors
}

// The update has failed and rollbacks are enabled.
for _, task := range tasks {
if err := task.rollbackImage(); err != nil {
err = fmt.Errorf("rolling back image for container %s in unit %s: %w", task.container.ID(), unit, err)
errors = append(errors, err)
}
}

if err := u.restartSystemdUnit(ctx, unit); err != nil {
for _, task := range tasks {
task.status = statusFailed
}
err = fmt.Errorf("restarting unit %s during rollback: %w", unit, err)
errors = append(errors, err)
return errors
}

for _, task := range tasks {
task.status = statusRolledBack
}

return errors
}

// report creates an auto-update report for the task.
Expand All @@ -258,7 +281,16 @@ func (t *task) report() *entities.AutoUpdateReport {
func (t *task) updateAvailable(ctx context.Context) (bool, error) {
switch t.policy {
case PolicyRegistryImage:
return t.registryUpdateAvailable(ctx)
// Errors checking for updates only should not be fatal.
// Especially on Edge systems, connection may be limited or
// there may just be a temporary downtime of the registry.
// But make sure to leave some breadcrumbs in the debug logs
// such that potential issues _can_ be analyzed if needed.
available, err := t.registryUpdateAvailable(ctx)
if err != nil {
logrus.Debugf("Error checking updates for image %s: %v (ignoring error)", t.rawImageName, err)
}
return available, nil
case PolicyLocalImage:
return t.localUpdateAvailable()
default:
Expand Down
35 changes: 22 additions & 13 deletions pkg/domain/infra/abi/play.go
Original file line number Diff line number Diff line change
Expand Up @@ -661,9 +661,10 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY

opts = append(opts, libpod.WithSdNotifyMode(sdNotifyMode))

var proxy *notifyproxy.NotifyProxy
// Create a notify proxy for the container.
if sdNotifyMode != "" && sdNotifyMode != define.SdNotifyModeIgnore {
proxy, err := notifyproxy.New("")
proxy, err = notifyproxy.New("")
if err != nil {
return nil, err
}
Expand All @@ -675,6 +676,9 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
if err != nil {
return nil, err
}
if proxy != nil {
proxy.AddContainer(ctr)
}
containers = append(containers, ctr)
}

Expand Down Expand Up @@ -774,21 +778,26 @@ func (ic *ContainerEngine) getImageAndLabelInfo(ctx context.Context, cwd string,
}

// Handle kube annotations
for k, v := range annotations {
switch k {
// Auto update annotation without container name will apply to
// all containers within the pod
case autoupdate.Label, autoupdate.AuthfileLabel:
labels[k] = v
// Auto update annotation with container name will apply only
// to the specified container
case fmt.Sprintf("%s/%s", autoupdate.Label, container.Name),
fmt.Sprintf("%s/%s", autoupdate.AuthfileLabel, container.Name):
prefixAndCtr := strings.Split(k, "/")
labels[prefixAndCtr[0]] = v
setLabel := func(label string) {
var result string
ctrSpecific := fmt.Sprintf("%s/%s", label, container.Name)
for k, v := range annotations {
switch k {
case label:
result = v
case ctrSpecific:
labels[label] = v
return
}
}
if result != "" {
labels[label] = result
}
}

setLabel(autoupdate.Label)
setLabel(autoupdate.AuthfileLabel)

return pulledImage, labels, nil
}

Expand Down
8 changes: 8 additions & 0 deletions pkg/specgen/generate/kube/kube.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"fmt"
"math"
"net"
"os"
"regexp"
"runtime"
"strconv"
Expand All @@ -26,6 +27,7 @@ import (
"github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/api/resource"
"github.com/containers/podman/v4/pkg/specgen"
"github.com/containers/podman/v4/pkg/specgen/generate"
systemdDefine "github.com/containers/podman/v4/pkg/systemd/define"
"github.com/containers/podman/v4/pkg/util"
"github.com/docker/docker/pkg/system"
"github.com/docker/go-units"
Expand Down Expand Up @@ -445,6 +447,12 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
}
}

// Make sure the container runs in a systemd unit which is
// stored as a label at container creation.
if unit := os.Getenv(systemdDefine.EnvVariable); unit != "" {
s.Labels[systemdDefine.EnvVariable] = unit
}

return s, nil
}

Expand Down
Loading

0 comments on commit db5ec4d

Please sign in to comment.