Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[release-1.24] fix: respect ReadOnly AccessMode in volume mount #1537

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions pkg/blob/blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -1096,3 +1096,12 @@ func isSupportedFSGroupChangePolicy(policy string) bool {
}
return false
}

func isReadOnlyFromCapability(vc *csi.VolumeCapability) bool {
if vc.GetAccessMode() == nil {
return false
}
mode := vc.GetAccessMode().GetMode()
return (mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY ||
mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY)
}
57 changes: 57 additions & 0 deletions pkg/blob/blob_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"time"

"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/stretchr/testify/assert"
"go.uber.org/mock/gomock"
"golang.org/x/sync/errgroup"
Expand Down Expand Up @@ -1822,3 +1823,59 @@ func TestIsSupportedFSGroupChangePolicy(t *testing.T) {
}
}
}

func TestIsReadOnlyFromCapability(t *testing.T) {
testCases := []struct {
name string
vc *csi.VolumeCapability
expectedResult bool
}{
{
name: "false with empty capabilities",
vc: &csi.VolumeCapability{},
expectedResult: false,
},
{
name: "fail with capabilities no access mode",
vc: &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
},
},
{
name: "false with SINGLE_NODE_WRITER capabilities",
vc: &csi.VolumeCapability{
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
},
},
expectedResult: false,
},
{
name: "true with MULTI_NODE_READER_ONLY capabilities",
vc: &csi.VolumeCapability{
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY,
},
},
expectedResult: true,
},
{
name: "true with SINGLE_NODE_READER_ONLY capabilities",
vc: &csi.VolumeCapability{
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY,
},
},
expectedResult: true,
},
}

for _, test := range testCases {
result := isReadOnlyFromCapability(test.vc)
if result != test.expectedResult {
t.Errorf("case(%s): isReadOnlyFromCapability returned with %v, not equal to %v", test.name, result, test.expectedResult)
}
}
}
11 changes: 10 additions & 1 deletion pkg/blob/nodeserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -340,6 +340,15 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe
serverAddress = fmt.Sprintf("%s.blob.%s", accountName, storageEndpointSuffix)
}

if isReadOnlyFromCapability(volumeCapability) {
if isNFSProtocol(protocol) {
mountFlags = util.JoinMountOptions(mountFlags, []string{"ro"})
} else {
mountFlags = util.JoinMountOptions(mountFlags, []string{"-o ro"})
}
klog.V(2).Infof("CSI volume is read-only, mounting with extra option ro")
}

if isNFSProtocol(protocol) {
klog.V(2).Infof("target %v\nprotocol %v\n\nvolumeId %v\nmountflags %v\nserverAddress %v",
targetPath, protocol, volumeID, mountFlags, serverAddress)
Expand Down Expand Up @@ -412,7 +421,7 @@ func (d *Driver) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRe
args = args + " " + opt
}

klog.V(2).Infof("target %v\nprotocol %v\n\nvolumeId %v\nountflags %v mountOptions %v volumeMountGroup %s\nargs %v\nserverAddress %v",
klog.V(2).Infof("target %v protocol %v volumeId %v\nmountflags %v\nmountOptions %v volumeMountGroup %s\nargs %v\nserverAddress %v",
targetPath, protocol, volumeID, mountFlags, mountOptions, volumeMountGroup, args, serverAddress)

authEnv = append(authEnv, "AZURE_STORAGE_ACCOUNT="+accountName, "AZURE_STORAGE_BLOB_ENDPOINT="+serverAddress)
Expand Down
61 changes: 60 additions & 1 deletion test/e2e/dynamic_provisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,6 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Dynamic Provisioning", func() {
Cmd: "touch /mnt/test-1/data",
Volumes: []testsuites.VolumeDetails{
{
FSType: "ext4",
ClaimSize: "10Gi",
VolumeMount: testsuites.VolumeMountDetails{
NameGenerate: "test-volume-",
Expand All @@ -227,6 +226,66 @@ var _ = ginkgo.Describe("[blob-csi-e2e] Dynamic Provisioning", func() {
test.Run(ctx, cs, ns)
})

ginkgo.It("should create a blobfuse volume on demand and mount it as readOnly when volume access mode is readonly", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{
{
Cmd: "touch /mnt/test-1/data",
Volumes: []testsuites.VolumeDetails{
{
ClaimSize: "10Gi",
VolumeMount: testsuites.VolumeMountDetails{
NameGenerate: "test-volume-",
MountPathGenerate: "/mnt/test-",
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany},
},
},
},
}
test := testsuites.DynamicallyProvisionedReadOnlyVolumeTest{
CSIDriver: testDriver,
Pods: pods,
StorageClassParameters: map[string]string{"skuName": "Standard_GRS"},
}
if isAzureStackCloud {
test.StorageClassParameters = map[string]string{"skuName": "Standard_LRS"}
}
test.Run(ctx, cs, ns)
})

ginkgo.It("should create a nfs volume on demand and mount it as readOnly when volume access mode is readonly", func(ctx ginkgo.SpecContext) {
if isAzureStackCloud {
ginkgo.Skip("test case is not available for Azure Stack")
}
pods := []testsuites.PodDetails{
{
Cmd: "touch /mnt/test-1/data",
Volumes: []testsuites.VolumeDetails{
{
ClaimSize: "10Gi",
MountOptions: []string{
"nconnect=8",
},
VolumeMount: testsuites.VolumeMountDetails{
NameGenerate: "test-volume-",
MountPathGenerate: "/mnt/test-",
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany},
},
},
},
}
test := testsuites.DynamicallyProvisionedReadOnlyVolumeTest{
CSIDriver: testDriver,
Pods: pods,
StorageClassParameters: map[string]string{
"skuName": "Premium_LRS",
"protocol": "nfs",
},
}
test.Run(ctx, cs, ns)
})

ginkgo.It("should create multiple PV objects, bind to PVCs and attach all to different pods on the same node", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{
{
Expand Down
6 changes: 6 additions & 0 deletions test/e2e/testsuites/specs.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ type VolumeDetails struct {
StorageClass *storagev1.StorageClass
NodeStageSecretRef string
Attrib map[string]string
AccessModes []v1.PersistentVolumeAccessMode
}

type VolumeMode int
Expand Down Expand Up @@ -196,6 +197,11 @@ func (volume *VolumeDetails) SetupDynamicPersistentVolumeClaim(ctx context.Conte
} else {
tpvc = NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass)
}

if len(volume.AccessModes) > 0 {
ginkgo.By("setting up the PVC with access modes")
tpvc.AccessModes = volume.AccessModes
}
tpvc.Create(ctx)
cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup)
// PV will not be ready until PVC is used in a pod when volumeBindingMode: WaitForFirstConsumer
Expand Down
5 changes: 5 additions & 0 deletions test/e2e/testsuites/testsuites.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ type TestPersistentVolumeClaim struct {
persistentVolumeClaim *v1.PersistentVolumeClaim
requestedPersistentVolumeClaim *v1.PersistentVolumeClaim
dataSource *v1.TypedLocalObjectReference
AccessModes []v1.PersistentVolumeAccessMode
}

func NewTestPersistentVolumeClaim(c clientset.Interface, ns *v1.Namespace, claimSize string, volumeMode VolumeMode, sc *storagev1.StorageClass) *TestPersistentVolumeClaim {
Expand Down Expand Up @@ -163,6 +164,10 @@ func (t *TestPersistentVolumeClaim) Create(ctx context.Context) {
storageClassName = t.storageClass.Name
}
t.requestedPersistentVolumeClaim = generatePVC(t.namespace.Name, storageClassName, t.claimSize, t.volumeMode, t.dataSource)
if len(t.AccessModes) > 0 {
ginkgo.By("setting access modes")
t.requestedPersistentVolumeClaim.Spec.AccessModes = t.AccessModes
}
t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Create(ctx, t.requestedPersistentVolumeClaim, metav1.CreateOptions{})
framework.ExpectNoError(err)
}
Expand Down
Loading