Skip to content

Commit

Permalink
Fix permissions on initially created named volumes
Browse files Browse the repository at this point in the history
Permission of volume should match the directory it is being mounted on.

Fixes: #10188

Signed-off-by: Daniel J Walsh <[email protected]>
  • Loading branch information
rhatdan committed Jun 14, 2021
1 parent e3dd12a commit 81eb71f
Show file tree
Hide file tree
Showing 4 changed files with 92 additions and 59 deletions.
60 changes: 1 addition & 59 deletions libpod/container_internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -1062,7 +1062,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
}

for _, v := range c.config.NamedVolumes {
if err := c.chownVolume(v.Name); err != nil {
if err := c.fixVolumePermissions(v); err != nil {
return err
}
}
Expand Down Expand Up @@ -1681,64 +1681,6 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
return vol, nil
}

// Chown the specified volume if necessary.
func (c *Container) chownVolume(volumeName string) error {
vol, err := c.runtime.state.Volume(volumeName)
if err != nil {
return errors.Wrapf(err, "error retrieving named volume %s for container %s", volumeName, c.ID())
}

vol.lock.Lock()
defer vol.lock.Unlock()

// The volume may need a copy-up. Check the state.
if err := vol.update(); err != nil {
return err
}

// TODO: For now, I've disabled chowning volumes owned by non-Podman
// drivers. This may be safe, but it's really going to be a case-by-case
// thing, I think - safest to leave disabled now and re-enable later if
// there is a demand.
if vol.state.NeedsChown && !vol.UsesVolumeDriver() {
vol.state.NeedsChown = false

uid := int(c.config.Spec.Process.User.UID)
gid := int(c.config.Spec.Process.User.GID)

if c.config.IDMappings.UIDMap != nil {
p := idtools.IDPair{
UID: uid,
GID: gid,
}
mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap)
newPair, err := mappings.ToHost(p)
if err != nil {
return errors.Wrapf(err, "error mapping user %d:%d", uid, gid)
}
uid = newPair.UID
gid = newPair.GID
}

vol.state.UIDChowned = uid
vol.state.GIDChowned = gid

if err := vol.save(); err != nil {
return err
}

mountPoint, err := vol.MountPoint()
if err != nil {
return err
}

if err := os.Lchown(mountPoint, uid, gid); err != nil {
return err
}
}
return nil
}

// cleanupStorage unmounts and cleans up the container's root filesystem
func (c *Container) cleanupStorage() error {
if !c.state.Mounted {
Expand Down
74 changes: 74 additions & 0 deletions libpod/container_internal_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -2426,3 +2426,77 @@ func (c *Container) createSecretMountDir() error {

return err
}

// Fix ownership and permissions of the specified volume if necessary.
func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
vol, err := c.runtime.state.Volume(v.Name)
if err != nil {
return errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
}

vol.lock.Lock()
defer vol.lock.Unlock()

// The volume may need a copy-up. Check the state.
if err := vol.update(); err != nil {
return err
}

// TODO: For now, I've disabled chowning volumes owned by non-Podman
// drivers. This may be safe, but it's really going to be a case-by-case
// thing, I think - safest to leave disabled now and re-enable later if
// there is a demand.
if vol.state.NeedsChown && !vol.UsesVolumeDriver() {
vol.state.NeedsChown = false

uid := int(c.config.Spec.Process.User.UID)
gid := int(c.config.Spec.Process.User.GID)

if c.config.IDMappings.UIDMap != nil {
p := idtools.IDPair{
UID: uid,
GID: gid,
}
mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap)
newPair, err := mappings.ToHost(p)
if err != nil {
return errors.Wrapf(err, "error mapping user %d:%d", uid, gid)
}
uid = newPair.UID
gid = newPair.GID
}

vol.state.UIDChowned = uid
vol.state.GIDChowned = gid

if err := vol.save(); err != nil {
return err
}

mountPoint, err := vol.MountPoint()
if err != nil {
return err
}

if err := os.Lchown(mountPoint, uid, gid); err != nil {
return err
}

// Make sure the new volume matches the permissions of the target directory.
// https://github.com/containers/podman/issues/10188
st, err := os.Lstat(filepath.Join(c.state.Mountpoint, v.Dest))
if err == nil {
if err := os.Chmod(mountPoint, st.Mode()|0111); err != nil {
return err
}
stat := st.Sys().(*syscall.Stat_t)
atime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
if err := os.Chtimes(mountPoint, atime, st.ModTime()); err != nil {
return err
}
} else if !os.IsNotExist(err) {
return err
}
}
return nil
}
5 changes: 5 additions & 0 deletions libpod/container_internal_unsupported.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,3 +57,8 @@ func (c *Container) reloadNetwork() error {
func (c *Container) getUserOverrides() *lookup.Overrides {
return nil
}

// Fix ownership and permissions of the specified volume if necessary.
func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
return define.ErrNotImplemented
}
12 changes: 12 additions & 0 deletions test/e2e/run_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -904,6 +904,18 @@ USER bin`, BB)
Expect(session.ExitCode()).To(Equal(100))
})

It("podman run with named volume", func() {
session := podmanTest.Podman([]string{"run", "--rm", ALPINE, "stat", "-c", "%a %Y", "/var/tmp"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
perms := session.OutputToString()

session = podmanTest.Podman([]string{"run", "--rm", "-v", "test:/var/tmp", ALPINE, "stat", "-c", "%a %Y", "/var/tmp"})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(Equal(0))
Expect(session.OutputToString()).To(Equal(perms))
})

It("podman run with built-in volume image", func() {
session := podmanTest.Podman([]string{"run", "--rm", redis, "ls"})
session.WaitWithDefaultTimeout()
Expand Down

0 comments on commit 81eb71f

Please sign in to comment.