From b0dc157af9f50f1d9b6d89750e73d496bc6ca730 Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Wed, 2 Jun 2021 05:28:26 -0400 Subject: [PATCH] Fix permissions on initially created named volumes Permission of volume should match the directory it is being mounted on. Fixes: https://github.com/containers/podman/issues/10188 Signed-off-by: Daniel J Walsh --- libpod/container_internal.go | 60 +------------------ libpod/container_internal_linux.go | 74 ++++++++++++++++++++++++ libpod/container_internal_unsupported.go | 5 ++ test/e2e/run_test.go | 12 ++++ 4 files changed, 92 insertions(+), 59 deletions(-) diff --git a/libpod/container_internal.go b/libpod/container_internal.go index 17921fbfbf..6cc4844935 100644 --- a/libpod/container_internal.go +++ b/libpod/container_internal.go @@ -1061,7 +1061,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { } for _, v := range c.config.NamedVolumes { - if err := c.chownVolume(v.Name); err != nil { + if err := c.fixVolumePermissions(v); err != nil { return err } } @@ -1680,64 +1680,6 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string) return vol, nil } -// Chown the specified volume if necessary. -func (c *Container) chownVolume(volumeName string) error { - vol, err := c.runtime.state.Volume(volumeName) - if err != nil { - return errors.Wrapf(err, "error retrieving named volume %s for container %s", volumeName, c.ID()) - } - - vol.lock.Lock() - defer vol.lock.Unlock() - - // The volume may need a copy-up. Check the state. - if err := vol.update(); err != nil { - return err - } - - // TODO: For now, I've disabled chowning volumes owned by non-Podman - // drivers. This may be safe, but it's really going to be a case-by-case - // thing, I think - safest to leave disabled now and re-enable later if - // there is a demand. - if vol.state.NeedsChown && !vol.UsesVolumeDriver() { - vol.state.NeedsChown = false - - uid := int(c.config.Spec.Process.User.UID) - gid := int(c.config.Spec.Process.User.GID) - - if c.config.IDMappings.UIDMap != nil { - p := idtools.IDPair{ - UID: uid, - GID: gid, - } - mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap) - newPair, err := mappings.ToHost(p) - if err != nil { - return errors.Wrapf(err, "error mapping user %d:%d", uid, gid) - } - uid = newPair.UID - gid = newPair.GID - } - - vol.state.UIDChowned = uid - vol.state.GIDChowned = gid - - if err := vol.save(); err != nil { - return err - } - - mountPoint, err := vol.MountPoint() - if err != nil { - return err - } - - if err := os.Lchown(mountPoint, uid, gid); err != nil { - return err - } - } - return nil -} - // cleanupStorage unmounts and cleans up the container's root filesystem func (c *Container) cleanupStorage() error { if !c.state.Mounted { diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go index 30eb36cd66..b31d90a6e1 100644 --- a/libpod/container_internal_linux.go +++ b/libpod/container_internal_linux.go @@ -2428,3 +2428,77 @@ func (c *Container) createSecretMountDir() error { return err } + +// Fix ownership and permissions of the specified volume if necessary. +func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error { + vol, err := c.runtime.state.Volume(v.Name) + if err != nil { + return errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID()) + } + + vol.lock.Lock() + defer vol.lock.Unlock() + + // The volume may need a copy-up. Check the state. + if err := vol.update(); err != nil { + return err + } + + // TODO: For now, I've disabled chowning volumes owned by non-Podman + // drivers. This may be safe, but it's really going to be a case-by-case + // thing, I think - safest to leave disabled now and re-enable later if + // there is a demand. + if vol.state.NeedsChown && !vol.UsesVolumeDriver() { + vol.state.NeedsChown = false + + uid := int(c.config.Spec.Process.User.UID) + gid := int(c.config.Spec.Process.User.GID) + + if c.config.IDMappings.UIDMap != nil { + p := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap) + newPair, err := mappings.ToHost(p) + if err != nil { + return errors.Wrapf(err, "error mapping user %d:%d", uid, gid) + } + uid = newPair.UID + gid = newPair.GID + } + + vol.state.UIDChowned = uid + vol.state.GIDChowned = gid + + if err := vol.save(); err != nil { + return err + } + + mountPoint, err := vol.MountPoint() + if err != nil { + return err + } + + if err := os.Lchown(mountPoint, uid, gid); err != nil { + return err + } + + // Make sure the new volume matches the permissions of the target directory. + // https://github.com/containers/podman/issues/10188 + st, err := os.Lstat(filepath.Join(c.state.Mountpoint, v.Dest)) + if err == nil { + if err := os.Chmod(mountPoint, st.Mode()|0111); err != nil { + return err + } + stat := st.Sys().(*syscall.Stat_t) + atime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if err := os.Chtimes(mountPoint, atime, st.ModTime()); err != nil { + return err + } + } else if !os.IsNotExist(err) { + return err + } + } + return nil +} diff --git a/libpod/container_internal_unsupported.go b/libpod/container_internal_unsupported.go index f979bcbdec..125329ce58 100644 --- a/libpod/container_internal_unsupported.go +++ b/libpod/container_internal_unsupported.go @@ -57,3 +57,8 @@ func (c *Container) reloadNetwork() error { func (c *Container) getUserOverrides() *lookup.Overrides { return nil } + +// Fix ownership and permissions of the specified volume if necessary. +func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error { + return define.ErrNotImplemented +} diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go index f27ded5d26..174714cacf 100644 --- a/test/e2e/run_test.go +++ b/test/e2e/run_test.go @@ -904,6 +904,18 @@ USER bin`, BB) Expect(session.ExitCode()).To(Equal(100)) }) + It("podman run with named volume", func() { + session := podmanTest.Podman([]string{"run", "--rm", ALPINE, "stat", "-c", "%a %Y", "/var/tmp"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + perms := session.OutputToString() + + session = podmanTest.Podman([]string{"run", "--rm", "-v", "test:/var/tmp", ALPINE, "stat", "-c", "%a %Y", "/var/tmp"}) + session.WaitWithDefaultTimeout() + Expect(session.ExitCode()).To(Equal(0)) + Expect(session.OutputToString()).To(Equal(perms)) + }) + It("podman run with built-in volume image", func() { session := podmanTest.Podman([]string{"run", "--rm", redis, "ls"}) session.WaitWithDefaultTimeout()