diff --git a/cmd/podman/kube/play.go b/cmd/podman/kube/play.go index 946eb29602..01451bad4f 100644 --- a/cmd/podman/kube/play.go +++ b/cmd/podman/kube/play.go @@ -73,6 +73,7 @@ var ( podman play kube --creds user:password --seccomp-profile-root /custom/path apache.yml podman play kube https://example.com/nginx.yml`, } + logDriverFlagName = "log-driver" ) func init() { @@ -116,7 +117,6 @@ func playFlags(cmd *cobra.Command) { flags.IPSliceVar(&playOptions.StaticIPs, staticIPFlagName, nil, "Static IP addresses to assign to the pods") _ = cmd.RegisterFlagCompletionFunc(staticIPFlagName, completion.AutocompleteNone) - logDriverFlagName := "log-driver" flags.StringVar(&playOptions.LogDriver, logDriverFlagName, common.LogDriver(), "Logging driver for the container") _ = cmd.RegisterFlagCompletionFunc(logDriverFlagName, common.AutocompleteLogDriver) @@ -247,6 +247,15 @@ func play(cmd *cobra.Command, args []string) error { return errors.New("--force may be specified only with --down") } + // When running under Systemd use passthrough as the default log-driver. + // When doing so, the journal socket is passed to the containers as-is which has two advantages: + // 1. journald can see who the actual sender of the log event is, + // rather than thinking everything comes from the conmon process + // 2. conmon will not have to copy all the log data + if !cmd.Flags().Changed(logDriverFlagName) && playOptions.ServiceContainer { + playOptions.LogDriver = define.PassthroughLogging + } + reader, err := readerFromArg(args[0]) if err != nil { return err diff --git a/test/system/250-systemd.bats b/test/system/250-systemd.bats index c560b20667..19917509ea 100644 --- a/test/system/250-systemd.bats +++ b/test/system/250-systemd.bats @@ -414,6 +414,12 @@ EOF run_podman 125 container rm $service_container is "$output" "Error: container .* is the service container of pod(s) .* and cannot be removed without removing the pod(s)" + # Verify that the log-driver for the Pod's containers is passthrough + for name in "a" "b"; do + run_podman container inspect test_pod-${name} --format "{{.HostConfig.LogConfig.Type}}" + is $output "passthrough" + done + # Add a simple `auto-update --dry-run` test here to avoid too much redundancy # with 255-auto-update.bats run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}" diff --git a/test/system/260-sdnotify.bats b/test/system/260-sdnotify.bats index 037dffd884..bf76dc9660 100644 --- a/test/system/260-sdnotify.bats +++ b/test/system/260-sdnotify.bats @@ -225,7 +225,7 @@ EOF wait_for_file $_SOCAT_LOG # Will run until all containers have stopped. - run_podman play kube --service-container=true $yaml_source + run_podman play kube --service-container=true --log-driver journald $yaml_source run_podman container wait $service_container test_pod-test # Make sure the containers have the correct policy. @@ -302,7 +302,7 @@ EOF # Run `play kube` in the background as it will wait for all containers to # send the READY=1 message. timeout --foreground -v --kill=10 60 \ - $PODMAN play kube --service-container=true $yaml_source &>/dev/null & + $PODMAN play kube --service-container=true --log-driver journald $yaml_source &>/dev/null & # Wait for both containers to be running for i in $(seq 1 20); do diff --git a/test/system/700-play.bats b/test/system/700-play.bats index 8a879083e2..7eb44c6323 100644 --- a/test/system/700-play.bats +++ b/test/system/700-play.bats @@ -126,7 +126,7 @@ EOF # Run `play kube` in the background as it will wait for the service # container to exit. timeout --foreground -v --kill=10 60 \ - $PODMAN play kube --service-container=true $yaml_source &>/dev/null & + $PODMAN play kube --service-container=true --log-driver journald $yaml_source &>/dev/null & # Wait for the container to be running container_a=test_pod-test