From 9558af2c4cd55744ffc19fb8d0b1f40abc3ba290 Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Wed, 19 Apr 2023 11:58:33 +0200 Subject: [PATCH 01/11] libpod: stop containers with --restart=always Commit 1ab833fb73 improved the situation but it is still not enough. If you run short lived containers with --restart=always podman is basically permanently restarting them. To only way to stop this is podman stop. However podman stop does not do anything when the container is already in a not running state. While this makes sense we should still mark the container as explicitly stopped by the user. Together with the change in shouldRestart() which now checks for StoppedByUser this makes sure the cleanup process is not going to start it back up again. A simple reproducer is: ``` podman run --restart=always --name test -d alpine true podman stop test ``` then check if the container is still running, the behavior is very flaky, it took me like 20 podman stop tries before I finally hit the correct window were it was stopped permanently. With this patch it worked on the first try. Fixes #18259 [NO NEW TESTS NEEDED] This is super flaky and hard to correctly test in CI. MY ginkgo v2 work seems to trigger this in play kube tests so that should catch at least some regressions. Also this may be something that should be tested at podman test days by users (#17912). Signed-off-by: Paul Holzinger --- libpod/container_api.go | 8 ------- libpod/container_internal.go | 42 ++++++++++++++++++++++++++++++------ 2 files changed, 35 insertions(+), 15 deletions(-) diff --git a/libpod/container_api.go b/libpod/container_api.go index 08c2557105..f928e51667 100644 --- a/libpod/container_api.go +++ b/libpod/container_api.go @@ -254,14 +254,6 @@ func (c *Container) StopWithTimeout(timeout uint) (finalErr error) { } } - if c.ensureState(define.ContainerStateStopped, define.ContainerStateExited) { - return define.ErrCtrStopped - } - - if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopping) { - return fmt.Errorf("can only stop created or running containers. %s is in state %s: %w", c.ID(), c.state.State.String(), define.ErrCtrStateInvalid) - } - return c.stop(timeout) } diff --git a/libpod/container_internal.go b/libpod/container_internal.go index f8b0e5fe3a..3cc3c735a2 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -238,6 +238,11 @@ func (c *Container) shouldRestart() bool { } } + // Explicitly stopped by user, do not restart again. + if c.state.StoppedByUser { + return false + } + // If we did not get a restart policy match, return false // Do the same if we're not a policy that restarts. if !c.state.RestartPolicyMatch || @@ -1307,15 +1312,38 @@ func (c *Container) stop(timeout uint) error { } } - // Set the container state to "stopping" and unlock the container - // before handing it over to conmon to unblock other commands. #8501 - // demonstrates nicely that a high stop timeout will block even simple - // commands such as `podman ps` from progressing if the container lock - // is held when busy-waiting for the container to be stopped. + // OK, the following code looks a bit weird but we have to make sure we can stop + // containers with the restart policy always, to do this we have to set + // StoppedByUser even when there is nothing to stop right now. This is due to the + // cleanup process waiting on the container lock and then afterwards restarts it. + // shouldRestart() then checks for StoppedByUser and does not restart it. + // https://github.com/containers/podman/issues/18259 + var cannotStopErr error + if c.ensureState(define.ContainerStateStopped, define.ContainerStateExited) { + cannotStopErr = define.ErrCtrStopped + } else if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopping) { + cannotStopErr = fmt.Errorf("can only stop created or running containers. %s is in state %s: %w", c.ID(), c.state.State.String(), define.ErrCtrStateInvalid) + } + c.state.StoppedByUser = true - c.state.State = define.ContainerStateStopping + if cannotStopErr == nil { + // Set the container state to "stopping" and unlock the container + // before handing it over to conmon to unblock other commands. #8501 + // demonstrates nicely that a high stop timeout will block even simple + // commands such as `podman ps` from progressing if the container lock + // is held when busy-waiting for the container to be stopped. + c.state.State = define.ContainerStateStopping + } if err := c.save(); err != nil { - return fmt.Errorf("saving container %s state before stopping: %w", c.ID(), err) + rErr := fmt.Errorf("saving container %s state before stopping: %w", c.ID(), err) + if cannotStopErr == nil { + return rErr + } + // we return below with cannotStopErr + logrus.Error(rErr) + } + if cannotStopErr != nil { + return cannotStopErr } if !c.batched { c.lock.Unlock() From 5b21c38c842b62bed9dac36fd8bb526d4979eaa9 Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Mon, 17 Apr 2023 18:16:50 +0200 Subject: [PATCH 02/11] podman-remote logs: handle server error correctly If the server responds with an error we must report it correct back to the user. Signed-off-by: Paul Holzinger --- pkg/bindings/containers/logs.go | 5 +++++ test/e2e/logs_test.go | 7 +++++++ 2 files changed, 12 insertions(+) diff --git a/pkg/bindings/containers/logs.go b/pkg/bindings/containers/logs.go index 9ebfd90dad..9d3fdb8ebe 100644 --- a/pkg/bindings/containers/logs.go +++ b/pkg/bindings/containers/logs.go @@ -35,6 +35,11 @@ func Logs(ctx context.Context, nameOrID string, options *LogOptions, stdoutChan, } defer response.Body.Close() + // if not success handle and return possible error message + if !(response.IsSuccess() || response.IsInformational()) { + return response.Process(nil) + } + buffer := make([]byte, 1024) for { fd, l, err := DemuxHeader(response.Body, buffer) diff --git a/test/e2e/logs_test.go b/test/e2e/logs_test.go index 5a50869e8f..46d1bd0d42 100644 --- a/test/e2e/logs_test.go +++ b/test/e2e/logs_test.go @@ -47,6 +47,13 @@ var _ = Describe("Podman logs", func() { }) + It("podman logs on not existent container", func() { + results := podmanTest.Podman([]string{"logs", "notexist"}) + results.WaitWithDefaultTimeout() + Expect(results).To(Exit(125)) + Expect(results.ErrorToString()).To(Equal(`Error: no container with name or ID "notexist" found: no such container`)) + }) + for _, log := range []string{"k8s-file", "journald", "json-file"} { // This is important to move the 'log' var to the correct scope under Ginkgo flow. log := log From 66fb7c9bb52ceded75fe2f798dfcd52217ca675a Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Wed, 3 May 2023 14:44:16 +0200 Subject: [PATCH 03/11] remote: exec inspect update exec session status The remote API will wait 300s by default before conmon will call the cleanup. In the meantime when you inspect an exec session started with ExecStart() (so not attached) and it did exit we do not know that. If a caller inspects it they think it is still running. To prevent this we should sync the session based on the exec pid and update the state accordingly. For a reproducer see the test in this commit or the issue. Fixes #18424 Signed-off-by: Paul Holzinger --- libpod/container.go | 11 +++++++++++ pkg/bindings/test/exec_test.go | 11 ++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/libpod/container.go b/libpod/container.go index b92255d7ed..083b4e9fc4 100644 --- a/libpod/container.go +++ b/libpod/container.go @@ -882,6 +882,17 @@ func (c *Container) execSessionNoCopy(id string) (*ExecSession, error) { return nil, fmt.Errorf("no exec session with ID %s found in container %s: %w", id, c.ID(), define.ErrNoSuchExecSession) } + // make sure to update the exec session if needed #18424 + alive, err := c.ociRuntime.ExecUpdateStatus(c, id) + if err != nil { + return nil, err + } + if !alive { + if err := retrieveAndWriteExecExitCode(c, session.ID()); err != nil { + return nil, err + } + } + return session, nil } diff --git a/pkg/bindings/test/exec_test.go b/pkg/bindings/test/exec_test.go index 5a2e34eaa1..67c8e4b66b 100644 --- a/pkg/bindings/test/exec_test.go +++ b/pkg/bindings/test/exec_test.go @@ -30,7 +30,7 @@ var _ = Describe("Podman containers exec", func() { bt.cleanup() }) - It("Podman exec create makes an exec session", func() { + It("Podman exec create+start makes an exec session", func() { name := "testCtr" cid, err := bt.RunTopContainer(&name, nil) Expect(err).ToNot(HaveOccurred()) @@ -48,6 +48,15 @@ var _ = Describe("Podman containers exec", func() { Expect(inspectOut.ProcessConfig.Entrypoint).To(Equal("echo")) Expect(inspectOut.ProcessConfig.Arguments).To(HaveLen(1)) Expect(inspectOut.ProcessConfig.Arguments[0]).To(Equal("hello world")) + + err = containers.ExecStart(bt.conn, sessionID, nil) + Expect(err).ToNot(HaveOccurred()) + + inspectOut, err = containers.ExecInspect(bt.conn, sessionID, nil) + Expect(err).ToNot(HaveOccurred()) + Expect(inspectOut.ContainerID).To(Equal(cid)) + Expect(inspectOut.Running).To(BeFalse(), "session should not be running") + Expect(inspectOut.ExitCode).To(Equal(0), "exit code from echo") }) It("Podman exec create with bad command fails", func() { From 02b9f4f5d030398844c11e8ccc603772df0299ab Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Thu, 27 Apr 2023 11:52:59 +0200 Subject: [PATCH 04/11] windows: podman save allow the use of stdout By default podman save tries to write to /dev/stdout, this file doe snot exists on windows and cannot be opened. Instead we should just use fd 1 in such case. [NO NEW TESTS NEEDED] Fixes #18147 Signed-off-by: Paul Holzinger --- pkg/domain/infra/tunnel/images.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/pkg/domain/infra/tunnel/images.go b/pkg/domain/infra/tunnel/images.go index f249769f73..91b07b08f1 100644 --- a/pkg/domain/infra/tunnel/images.go +++ b/pkg/domain/infra/tunnel/images.go @@ -279,10 +279,19 @@ func (ir *ImageEngine) Save(ctx context.Context, nameOrID string, tags []string, defer func() { _ = os.Remove(f.Name()) }() } default: - // This code was added to allow for opening stdout replacing - // os.Create(opts.Output) which was attempting to open the file - // for read/write which fails on Darwin platforms - f, err = os.OpenFile(opts.Output, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + // This is ugly but I think the best we can do for now, + // on windows there is no /dev/stdout but the save command defaults to /dev/stdout. + // The proper thing to do would be to pass an io.Writer down from the cli frontend + // but since the local save API does not support an io.Writer this is impossible. + // I reported it a while ago in https://github.com/containers/common/issues/1275 + if opts.Output == "/dev/stdout" { + f = os.Stdout + } else { + // This code was added to allow for opening stdout replacing + // os.Create(opts.Output) which was attempting to open the file + // for read/write which fails on Darwin platforms + f, err = os.OpenFile(opts.Output, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + } } if err != nil { return err From dc21698d60edf52df1af38c84c488a8ea3328542 Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Wed, 26 Apr 2023 16:44:28 +0200 Subject: [PATCH 05/11] machine: qemu only remove connection after confirmation the connection remove call must be done inside the function that is returned so that we wait until the user confirmed it. Fixes #18330 Signed-off-by: Paul Holzinger --- pkg/machine/qemu/machine.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/pkg/machine/qemu/machine.go b/pkg/machine/qemu/machine.go index f39cbd8abc..d4a72ed240 100644 --- a/pkg/machine/qemu/machine.go +++ b/pkg/machine/qemu/machine.go @@ -940,13 +940,6 @@ func (v *MachineVM) Remove(_ string, opts machine.RemoveOptions) (string, func() files = append(files, socketPath.Path) files = append(files, v.archRemovalFiles()...) - if err := machine.RemoveConnection(v.Name); err != nil { - logrus.Error(err) - } - if err := machine.RemoveConnection(v.Name + "-root"); err != nil { - logrus.Error(err) - } - vmConfigDir, err := machine.GetConfDir(vmtype) if err != nil { return "", nil, err @@ -977,6 +970,12 @@ func (v *MachineVM) Remove(_ string, opts machine.RemoveOptions) (string, func() logrus.Error(err) } } + if err := machine.RemoveConnection(v.Name); err != nil { + logrus.Error(err) + } + if err := machine.RemoveConnection(v.Name + "-root"); err != nil { + logrus.Error(err) + } return nil }, nil } From 7d0e4a6443dd95e61e4a9522e910c2a0b1f77e8f Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Thu, 4 May 2023 11:57:02 +0200 Subject: [PATCH 06/11] compat container create: match duplicate mounts correctly The logic which checks for duplicated volumes here did not work correctly because it used filepath.Clean(). However the writes to the volDestinations map did not thus the string no longer matched when you included a final slash for example. So we can either call Clean() on all or no paths. I decided to call it on no path because this is what we do right now. Just the check did it. Fixed #18454 Signed-off-by: Paul Holzinger --- pkg/api/handlers/compat/containers_create.go | 2 +- test/apiv2/20-containers.at | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/pkg/api/handlers/compat/containers_create.go b/pkg/api/handlers/compat/containers_create.go index 811305341c..03acff66e4 100644 --- a/pkg/api/handlers/compat/containers_create.go +++ b/pkg/api/handlers/compat/containers_create.go @@ -483,7 +483,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C // This also handles volumes duplicated between cc.HostConfig.Mounts and // cc.Volumes, as seen in compose v2.0. for vol := range cc.Volumes { - if _, ok := volDestinations[filepath.Clean(vol)]; ok { + if _, ok := volDestinations[vol]; ok { continue } cliOpts.Volume = append(cliOpts.Volume, vol) diff --git a/test/apiv2/20-containers.at b/test/apiv2/20-containers.at index 7cfe161de4..7840ea5242 100644 --- a/test/apiv2/20-containers.at +++ b/test/apiv2/20-containers.at @@ -357,6 +357,15 @@ t GET containers/$cid/json 200 \ t DELETE containers/$cid?v=true 204 +# Test Volumes with bind mount, for some reason docker-py sets this #18454 +t POST containers/create Image=$IMAGE Volumes='{"/test/":{}}' HostConfig='{"Binds":["/tmp:/test/:ro"]}' 201 \ + .Id~[0-9a-f]\\{64\\} +cid=$(jq -r '.Id' <<<"$output") +t GET containers/$cid/json 200 \ + .Mounts[0].Destination="/test/" + +t DELETE containers/$cid?v=true 204 + # test port mapping podman run -d --rm --name bar -p 8080:9090 $IMAGE top From 1e86d0a75b9db7fe8b342c4f0bc9561d27ce0c24 Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Wed, 17 May 2023 11:34:26 +0200 Subject: [PATCH 07/11] compat: accept tag in /images/create?fromSrc Accept a tag in the compat api endpoint. For the fromImage param we already parse it but for fromSrc we did not. Fixes #18597 Signed-off-by: Paul Holzinger --- pkg/api/handlers/compat/images.go | 5 +++-- test/apiv2/10-images.at | 7 +++++++ test/apiv2/test-apiv2 | 4 ++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/pkg/api/handlers/compat/images.go b/pkg/api/handlers/compat/images.go index 4e5662c845..e3e97ce89b 100644 --- a/pkg/api/handlers/compat/images.go +++ b/pkg/api/handlers/compat/images.go @@ -181,7 +181,8 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) { FromSrc string `schema:"fromSrc"` Message string `schema:"message"` Platform string `schema:"platform"` - Repo string `shchema:"repo"` + Repo string `schema:"repo"` + Tag string `schema:"tag"` }{ // This is where you can override the golang default value for one of fields } @@ -208,7 +209,7 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) { reference := query.Repo if query.Repo != "" { - possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, reference) + possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, mergeNameAndTagOrDigest(reference, query.Tag)) if err != nil { utils.Error(w, http.StatusInternalServerError, fmt.Errorf("normalizing image: %w", err)) return diff --git a/test/apiv2/10-images.at b/test/apiv2/10-images.at index 5136328ba7..a4abe79a9b 100644 --- a/test/apiv2/10-images.at +++ b/test/apiv2/10-images.at @@ -66,6 +66,13 @@ podman untag docker.io/library/alpine:latest t POST "images/create?fromImage=quay.io/libpod/alpine&tag=sha256:fa93b01658e3a5a1686dc3ae55f170d8de487006fb53a28efcd12ab0710a2e5f" 200 +# create image from source with tag +# Note the "-" is used to use an empty body and not "{}" which is the default. +t POST "images/create?fromSrc=-&repo=myimage&tag=mytag" - 200 +t GET "images/myimage:mytag/json" 200 \ + .Id~'^sha256:[0-9a-f]\{64\}$' \ + .RepoTags[0]="docker.io/library/myimage:mytag" + # Display the image history t GET libpod/images/nonesuch/history 404 diff --git a/test/apiv2/test-apiv2 b/test/apiv2/test-apiv2 index 8132e6432e..cd56747065 100755 --- a/test/apiv2/test-apiv2 +++ b/test/apiv2/test-apiv2 @@ -255,6 +255,10 @@ function t() { for arg; do case "$arg" in + # This is just some hack to avoid adding `-d {}` to curl for endpoints where we really need an empty body. + # --disable makes curl not lookup the curlrc file, it't should't effect the tests in any way. + -) curl_args+=(--disable); + shift;; *=*) post_args+=("$arg"); shift;; *.json) _add_curl_args $arg; From 1575b3a7f12f39a25aa43a41746e69fbc42f0063 Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Mon, 15 May 2023 16:37:47 +0200 Subject: [PATCH 08/11] machine: fix default connection URL to use 127.0.0.1 gvproxy listens on 127.0.0.1, using localhost as hostname can result in the client trying to connect to the ipv6 localhost (`::1`). This will fail as shown in the issue. This switches the hostname in the system connection to 127.0.0.1 to fix this problem. I switched the qemu, hyperV and WSL backend. I haven't touched the applehv code because it uses two different ips and I am not sure what is the correct thing there. I leave this to Brent to figure out. [NO NEW TESTS NEEDED] [1] https://github.com/containers/gvisor-tap-vsock/blob/main/cmd/gvproxy/main.go#L197-L199 Fixes #16470 Signed-off-by: Paul Holzinger --- pkg/machine/connection.go | 2 ++ pkg/machine/hyperv/machine.go | 4 ++-- pkg/machine/qemu/machine.go | 4 ++-- pkg/machine/wsl/machine.go | 4 ++-- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/pkg/machine/connection.go b/pkg/machine/connection.go index 93c638cc7d..3273b55829 100644 --- a/pkg/machine/connection.go +++ b/pkg/machine/connection.go @@ -10,6 +10,8 @@ import ( "github.com/containers/common/pkg/config" ) +const LocalhostIP = "127.0.0.1" + func AddConnection(uri fmt.Stringer, name, identity string, isDefault bool) error { if len(identity) < 1 { return errors.New("identity must be defined") diff --git a/pkg/machine/hyperv/machine.go b/pkg/machine/hyperv/machine.go index 177dc8c8a1..364abd574b 100644 --- a/pkg/machine/hyperv/machine.go +++ b/pkg/machine/hyperv/machine.go @@ -99,8 +99,8 @@ func (m *HyperVMachine) Init(opts machine.InitOptions) (bool, error) { m.IdentityPath = filepath.Join(sshDir, m.Name) if len(opts.IgnitionPath) < 1 { - uri := machine.SSHRemoteConnection.MakeSSHURL("localhost", fmt.Sprintf("/run/user/%d/podman/podman.sock", m.UID), strconv.Itoa(m.Port), m.RemoteUsername) - uriRoot := machine.SSHRemoteConnection.MakeSSHURL("localhost", "/run/podman/podman.sock", strconv.Itoa(m.Port), "root") + uri := machine.SSHRemoteConnection.MakeSSHURL(machine.LocalhostIP, fmt.Sprintf("/run/user/%d/podman/podman.sock", m.UID), strconv.Itoa(m.Port), m.RemoteUsername) + uriRoot := machine.SSHRemoteConnection.MakeSSHURL(machine.LocalhostIP, "/run/podman/podman.sock", strconv.Itoa(m.Port), "root") identity := filepath.Join(sshDir, m.Name) uris := []url.URL{uri, uriRoot} diff --git a/pkg/machine/qemu/machine.go b/pkg/machine/qemu/machine.go index d4a72ed240..3684d3fcae 100644 --- a/pkg/machine/qemu/machine.go +++ b/pkg/machine/qemu/machine.go @@ -328,8 +328,8 @@ func (v *MachineVM) Init(opts machine.InitOptions) (bool, error) { v.CmdLine = append(v.CmdLine, "-drive", "if=virtio,file="+v.getImageFile()) // This kind of stinks but no other way around this r/n if len(opts.IgnitionPath) < 1 { - uri := machine.SSHRemoteConnection.MakeSSHURL("localhost", fmt.Sprintf("/run/user/%d/podman/podman.sock", v.UID), strconv.Itoa(v.Port), v.RemoteUsername) - uriRoot := machine.SSHRemoteConnection.MakeSSHURL("localhost", "/run/podman/podman.sock", strconv.Itoa(v.Port), "root") + uri := machine.SSHRemoteConnection.MakeSSHURL(machine.LocalhostIP, fmt.Sprintf("/run/user/%d/podman/podman.sock", v.UID), strconv.Itoa(v.Port), v.RemoteUsername) + uriRoot := machine.SSHRemoteConnection.MakeSSHURL(machine.LocalhostIP, "/run/podman/podman.sock", strconv.Itoa(v.Port), "root") identity := filepath.Join(sshDir, v.Name) uris := []url.URL{uri, uriRoot} diff --git a/pkg/machine/wsl/machine.go b/pkg/machine/wsl/machine.go index db14af074a..47a52b4b84 100644 --- a/pkg/machine/wsl/machine.go +++ b/pkg/machine/wsl/machine.go @@ -474,8 +474,8 @@ func (v *MachineVM) writeConfig() error { } func setupConnections(v *MachineVM, opts machine.InitOptions, sshDir string) error { - uri := machine.SSHRemoteConnection.MakeSSHURL("localhost", "/run/user/1000/podman/podman.sock", strconv.Itoa(v.Port), v.RemoteUsername) - uriRoot := machine.SSHRemoteConnection.MakeSSHURL("localhost", "/run/podman/podman.sock", strconv.Itoa(v.Port), "root") + uri := machine.SSHRemoteConnection.MakeSSHURL(machine.LocalhostIP, "/run/user/1000/podman/podman.sock", strconv.Itoa(v.Port), v.RemoteUsername) + uriRoot := machine.SSHRemoteConnection.MakeSSHURL(machine.LocalhostIP, "/run/podman/podman.sock", strconv.Itoa(v.Port), "root") identity := filepath.Join(sshDir, v.Name) uris := []url.URL{uri, uriRoot} From 083894a22b41fc52e9380b6ef3e58f79088918a1 Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Mon, 22 May 2023 13:30:39 +0200 Subject: [PATCH 09/11] network create/update: allow dns servers comma separated The examples show that --dns-add 8.8.8.8,1.1.1.1 is valid but it fails, fix this by using StringSliceVar which splits at commas. Added tests to ensure it is working. Fixes #18632 Signed-off-by: Paul Holzinger --- cmd/podman/networks/create.go | 2 +- cmd/podman/networks/update.go | 4 +-- test/e2e/run_networking_test.go | 58 +++++++++++++++++++++++++++++++-- 3 files changed, 58 insertions(+), 6 deletions(-) diff --git a/cmd/podman/networks/create.go b/cmd/podman/networks/create.go index 626c2d32e1..289d83dec2 100644 --- a/cmd/podman/networks/create.go +++ b/cmd/podman/networks/create.go @@ -85,7 +85,7 @@ func networkCreateFlags(cmd *cobra.Command) { flags.BoolVar(&networkCreateOptions.IgnoreIfExists, "ignore", false, "Don't fail if network already exists") dnsserverFlagName := "dns" - flags.StringArrayVar(&networkCreateOptions.NetworkDNSServers, dnsserverFlagName, nil, "DNS servers this network will use") + flags.StringSliceVar(&networkCreateOptions.NetworkDNSServers, dnsserverFlagName, nil, "DNS servers this network will use") _ = cmd.RegisterFlagCompletionFunc(dnsserverFlagName, completion.AutocompleteNone) } func init() { diff --git a/cmd/podman/networks/update.go b/cmd/podman/networks/update.go index 61d0384538..cd3b8e8422 100644 --- a/cmd/podman/networks/update.go +++ b/cmd/podman/networks/update.go @@ -31,9 +31,9 @@ func networkUpdateFlags(cmd *cobra.Command) { flags := cmd.Flags() addDNSServerFlagName := "dns-add" - flags.StringArrayVar(&networkUpdateOptions.AddDNSServers, addDNSServerFlagName, nil, "add network level nameservers") + flags.StringSliceVar(&networkUpdateOptions.AddDNSServers, addDNSServerFlagName, nil, "add network level nameservers") removeDNSServerFlagName := "dns-drop" - flags.StringArrayVar(&networkUpdateOptions.RemoveDNSServers, removeDNSServerFlagName, nil, "remove network level nameservers") + flags.StringSliceVar(&networkUpdateOptions.RemoveDNSServers, removeDNSServerFlagName, nil, "remove network level nameservers") _ = cmd.RegisterFlagCompletionFunc(addDNSServerFlagName, completion.AutocompleteNone) _ = cmd.RegisterFlagCompletionFunc(removeDNSServerFlagName, completion.AutocompleteNone) } diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go index a9fcbd008f..08ed9c0509 100644 --- a/test/e2e/run_networking_test.go +++ b/test/e2e/run_networking_test.go @@ -55,6 +55,7 @@ var _ = Describe("Podman run networking", func() { session = podmanTest.Podman([]string{"network", "inspect", net}) session.WaitWithDefaultTimeout() defer podmanTest.removeNetwork(net) + Expect(session).Should(Exit(0)) var results []types.Network err := json.Unmarshal([]byte(session.OutputToString()), &results) Expect(err).ToNot(HaveOccurred()) @@ -62,8 +63,7 @@ var _ = Describe("Podman run networking", func() { result := results[0] Expect(result.Subnets).To(HaveLen(1)) aardvarkDNSGateway := result.Subnets[0].Gateway.String() - Expect(session.OutputToString()).To(ContainSubstring("1.1.1.1")) - Expect(session).Should(Exit(0)) + Expect(result.NetworkDNSServers).To(Equal([]string{"1.1.1.1"})) session = podmanTest.Podman([]string{"run", "-d", "--name", "con1", "--network", net, "busybox", "top"}) session.WaitWithDefaultTimeout() @@ -75,7 +75,7 @@ var _ = Describe("Podman run networking", func() { Expect(session.OutputToString()).To(ContainSubstring("Non-authoritative answer: Name: google.com Address:")) // Update to a bad DNS Server - session = podmanTest.Podman([]string{"network", "update", net, "--dns-add", "7.7.7.7"}) + session = podmanTest.Podman([]string{"network", "update", net, "--dns-add", "127.0.0.255"}) session.WaitWithDefaultTimeout() Expect(session).Should(Exit(0)) @@ -90,6 +90,58 @@ var _ = Describe("Podman run networking", func() { Expect(session.OutputToString()).To(ContainSubstring(";; connection timed out; no servers could be reached")) }) + It("podman network dns multiple servers", func() { + // Following test is only functional with netavark and aardvark + SkipIfCNI(podmanTest) + net := createNetworkName("IntTest") + session := podmanTest.Podman([]string{"network", "create", net, "--dns", "1.1.1.1,8.8.8.8", "--dns", "8.4.4.8"}) + session.WaitWithDefaultTimeout() + defer podmanTest.removeNetwork(net) + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"network", "inspect", net}) + session.WaitWithDefaultTimeout() + defer podmanTest.removeNetwork(net) + Expect(session).Should(Exit(0)) + var results []types.Network + err := json.Unmarshal([]byte(session.OutputToString()), &results) + Expect(err).ToNot(HaveOccurred()) + Expect(results).To(HaveLen(1)) + result := results[0] + Expect(result.Subnets).To(HaveLen(1)) + aardvarkDNSGateway := result.Subnets[0].Gateway.String() + Expect(result.NetworkDNSServers).To(Equal([]string{"1.1.1.1", "8.8.8.8", "8.4.4.8"})) + + session = podmanTest.Podman([]string{"run", "-d", "--name", "con1", "--network", net, "busybox", "top"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"exec", "con1", "nslookup", "google.com", aardvarkDNSGateway}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + Expect(session.OutputToString()).To(ContainSubstring("Non-authoritative answer: Name: google.com Address:")) + + // Update DNS server + session = podmanTest.Podman([]string{"network", "update", net, "--dns-drop=1.1.1.1,8.8.8.8", + "--dns-drop", "8.4.4.8", "--dns-add", "127.0.0.253,127.0.0.254", "--dns-add", "127.0.0.255"}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(0)) + + session = podmanTest.Podman([]string{"network", "inspect", net}) + session.WaitWithDefaultTimeout() + defer podmanTest.removeNetwork(net) + Expect(session).Should(Exit(0)) + err = json.Unmarshal([]byte(session.OutputToString()), &results) + Expect(err).ToNot(HaveOccurred()) + Expect(results).To(HaveLen(1)) + Expect(results[0].NetworkDNSServers).To(Equal([]string{"127.0.0.253", "127.0.0.254", "127.0.0.255"})) + + session = podmanTest.Podman([]string{"exec", "con1", "nslookup", "google.com", aardvarkDNSGateway}) + session.WaitWithDefaultTimeout() + Expect(session).Should(Exit(1)) + Expect(session.OutputToString()).To(ContainSubstring(";; connection timed out; no servers could be reached")) + }) + It("podman run network connection with default bridge", func() { session := podmanTest.RunContainerWithNetworkTest("") session.WaitWithDefaultTimeout() From d812087d11c2e6273e796c05ae7e6ea71f0b2df6 Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Wed, 12 Apr 2023 20:35:13 +0200 Subject: [PATCH 10/11] libpod: rootlessNetNs.Cleanup() fix error message The wrong error was logged. Signed-off-by: Paul Holzinger --- libpod/networking_common.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libpod/networking_common.go b/libpod/networking_common.go index a569e9eaed..24165e1598 100644 --- a/libpod/networking_common.go +++ b/libpod/networking_common.go @@ -115,7 +115,7 @@ func (r *Runtime) teardownNetworkBackend(ns string, opts types.NetworkOptions) e // execute the network setup in the rootless net ns err = rootlessNetNS.Do(tearDownPod) if cerr := rootlessNetNS.Cleanup(r); cerr != nil { - logrus.WithError(err).Error("failed to clean up rootless netns") + logrus.WithError(cerr).Error("failed to clean up rootless netns") } rootlessNetNS.Lock.Unlock() } else { From 97ec57da5b8f9b08755afdca46be268c3c2eb2bf Mon Sep 17 00:00:00 2001 From: Paul Holzinger Date: Mon, 17 Apr 2023 11:28:16 +0200 Subject: [PATCH 11/11] libpod: configureNetNS() tear down on errors Make sure to tear down the netns again on errors. This is needed when a later call fails and we do not have already stored the netns in the container state. [NO NEW TESTS NEEDED] My ginkgo-v2 PR will catch problem like this once merged. Fixes #18205 Signed-off-by: Paul Holzinger --- libpod/networking_linux.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go index 89eb6bbb08..13befa0b37 100644 --- a/libpod/networking_linux.go +++ b/libpod/networking_linux.go @@ -592,6 +592,14 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS string) (status map[strin if err != nil { return nil, err } + defer func() { + // do not forget to tear down the netns when a later error happened. + if rerr != nil { + if err := r.teardownNetworkBackend(ctrNS, netOpts); err != nil { + logrus.Warnf("failed to teardown network after failed setup: %v", err) + } + } + }() // set up rootless port forwarder when rootless with ports and the network status is empty, // if this is called from network reload the network status will not be empty and we should