Skip to content

Commit

Permalink
Add Snapshot and Clone Support for Multi-Available Zone (#365)
Browse files Browse the repository at this point in the history
* Add snapshot check and topology add during zone volume creation

* Add topology checks for clones

* Add zone snapshot and restore e2e test

* Address failed PR checks

* Update README

* Added e2e tests for clones

* Add snap and clone test (#371)

* Address PR comments

---------

Co-authored-by: Trevor Dawe <[email protected]>
Co-authored-by: Bharath Sreekanth <[email protected]>
  • Loading branch information
3 people committed Dec 9, 2024
1 parent 94c8b3f commit 21c9dfd
Show file tree
Hide file tree
Showing 8 changed files with 424 additions and 14 deletions.
44 changes: 39 additions & 5 deletions service/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,23 @@ func (s *service) CreateVolume(

// Handle Zone topology, which happens when node is annotated with a matching zone label
if len(zoneTargetMap) != 0 && accessibility != nil && len(accessibility.GetPreferred()) > 0 {
contentSource := req.GetVolumeContentSource()
var sourceSystemID string
if contentSource != nil {
Log.Infof("[CreateVolume] Zone volume has a content source - we are a snapshot or clone: %+v", contentSource)

snapshotSource := contentSource.GetSnapshot()
cloneSource := contentSource.GetVolume()

if snapshotSource != nil {
sourceSystemID = s.getSystemIDFromCsiVolumeID(snapshotSource.SnapshotId)
Log.Infof("[CreateVolume] Zone snapshot source systemID: %s", sourceSystemID)
} else if cloneSource != nil {
sourceSystemID = s.getSystemIDFromCsiVolumeID(cloneSource.VolumeId)
Log.Infof("[CreateVolume] Zone clone source systemID: %s", sourceSystemID)
}
}

for _, topo := range accessibility.GetPreferred() {
for topoLabel, zoneName := range topo.Segments {
Log.Infof("Zoning based on label %s", s.opts.zoneLabelKey)
Expand All @@ -231,12 +248,14 @@ func (s *service) CreateVolume(
continue
}

if sourceSystemID != "" && zoneTarget.systemID != sourceSystemID {
continue
}

protectionDomain = string(zoneTarget.protectionDomain)
storagePool = string(zoneTarget.pool)
systemID = zoneTarget.systemID

Log.Infof("Preferred topology zone %s, systemID %s, protectionDomain %s, and storagePool %s", zoneName, systemID, protectionDomain, storagePool)

if err := s.requireProbe(ctx, systemID); err != nil {
Log.Errorln("Failed to probe system " + systemID)
continue
Expand All @@ -247,6 +266,8 @@ func (s *service) CreateVolume(
Segments: systemSegments,
})

// We found a zone topology
Log.Infof("Preferred topology zone %s, systemID %s, protectionDomain %s, and storagePool %s", zoneName, systemID, protectionDomain, storagePool)
zoneTopology = true
}
}
Expand Down Expand Up @@ -552,13 +573,26 @@ func (s *service) CreateVolume(
if contentSource != nil {
volumeSource := contentSource.GetVolume()
if volumeSource != nil {
Log.Printf("volume %s specified as volume content source", volumeSource.VolumeId)
return s.Clone(req, volumeSource, name, size, storagePool)
cloneResponse, err := s.Clone(req, volumeSource, name, size, storagePool)
if err != nil {
return nil, err
}

cloneResponse.Volume.AccessibleTopology = volumeTopology

return cloneResponse, nil
}
snapshotSource := contentSource.GetSnapshot()
if snapshotSource != nil {
Log.Printf("snapshot %s specified as volume content source", snapshotSource.SnapshotId)
return s.createVolumeFromSnapshot(req, snapshotSource, name, size, storagePool)
snapshotVolumeResponse, err := s.createVolumeFromSnapshot(req, snapshotSource, name, size, storagePool)
if err != nil {
return nil, err
}

snapshotVolumeResponse.Volume.AccessibleTopology = volumeTopology

return snapshotVolumeResponse, nil
}
}

Expand Down
26 changes: 26 additions & 0 deletions service/features/service.feature
Original file line number Diff line number Diff line change
Expand Up @@ -1582,3 +1582,29 @@ Feature: VxFlex OS CSI interface
| config |
| "multi_az" |
| "multi_az_custom_labels" |

Scenario: Snapshot a single volume in zone
Given a VxFlexOS service
And I use config <config>
When I call Probe
And I call CreateVolume "volume1" with zones
And a valid CreateVolumeResponse is returned
And I call CreateSnapshot <name>
Then a valid CreateSnapshotResponse is returned
And I call Create Volume for zones from Snapshot <name>
Then a valid CreateVolumeResponse is returned
Examples:
| name | config | errorMsg |
| "snap1" | "multi_az" | "none" |

Scenario: Clone a single volume in zone
Given a VxFlexOS service
And I use config <config>
When I call Probe
And I call CreateVolume <name> with zones
And a valid CreateVolumeResponse is returned
And I call Clone volume for zones <name>
Then a valid CreateVolumeResponse is returned
Examples:
| name | config | errorMsg |
| "volume1" | "multi_az" | "none" |
40 changes: 38 additions & 2 deletions service/step_defs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3815,6 +3815,39 @@ func (f *feature) iCallCreateVolumeFromSnapshot() error {
return nil
}

func (f *feature) iCallCreateVolumeForZonesFromSnapshot(snapshotID string) error {
ctx := new(context.Context)
req := getZoneEnabledRequest(f.service.opts.zoneLabelKey)
req.Name = "volumeForZonesFromSnap "

source := &csi.VolumeContentSource_SnapshotSource{SnapshotId: snapshotID}
req.VolumeContentSource = new(csi.VolumeContentSource)
req.VolumeContentSource.Type = &csi.VolumeContentSource_Snapshot{Snapshot: source}
f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req)
if f.err != nil {
fmt.Printf("Error on CreateVolume for zones from snap: %s\n", f.err.Error())
}
return nil
}

func (f *feature) iCallCloneVolumeForZones(volumeID string) error {
ctx := new(context.Context)
req := getZoneEnabledRequest(f.service.opts.zoneLabelKey)
req.Name = "clone"

source := &csi.VolumeContentSource_VolumeSource{VolumeId: volumeID}
req.VolumeContentSource = new(csi.VolumeContentSource)
req.VolumeContentSource.Type = &csi.VolumeContentSource_Volume{Volume: source}
req.AccessibilityRequirements = new(csi.TopologyRequirement)
fmt.Printf("CallCloneVolumeForZones with request = %v", req)
f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req)
if f.err != nil {
fmt.Printf("Error on CreateVolume from volume: %s\n", f.err.Error())
}

return nil
}

func (f *feature) iCallCreateVolumeFromSnapshotNFS() error {
ctx := new(context.Context)
req := getTypicalNFSCreateVolumeRequest()
Expand Down Expand Up @@ -4688,6 +4721,7 @@ func (f *feature) iCallPingNASServer(systemID string, name string) error {
func getZoneEnabledRequest(zoneLabelName string) *csi.CreateVolumeRequest {
req := new(csi.CreateVolumeRequest)
params := make(map[string]string)
params["storagepool"] = "viki_pool_HDD_20181031"
req.Parameters = params
capacityRange := new(csi.CapacityRange)
capacityRange.RequiredBytes = 32 * 1024 * 1024 * 1024
Expand Down Expand Up @@ -4718,11 +4752,11 @@ func (f *feature) iCallCreateVolumeWithZones(name string) error {
req := f.createVolumeRequest
req.Name = name

fmt.Println("I am in iCallCreateVolume fn.....")
fmt.Printf("I am in iCallCreateVolume with zones fn with req => ..... %v ...", req)

f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req)
if f.err != nil {
log.Printf("CreateVolume called failed: %s\n", f.err.Error())
log.Printf("CreateVolume with zones called failed: %s\n", f.err.Error())
}

if f.createVolumeResponse != nil {
Expand Down Expand Up @@ -4905,6 +4939,8 @@ func FeatureContext(s *godog.ScenarioContext) {
s.Step(`^I call DeleteSnapshot NFS$`, f.iCallDeleteSnapshotNFS)
s.Step(`^a valid snapshot consistency group$`, f.aValidSnapshotConsistencyGroup)
s.Step(`^I call Create Volume from Snapshot$`, f.iCallCreateVolumeFromSnapshot)
s.Step(`^I call Create Volume for zones from Snapshot "([^"]*)"$`, f.iCallCreateVolumeForZonesFromSnapshot)
s.Step(`^I call Clone volume for zones "([^"]*)"$`, f.iCallCloneVolumeForZones)
s.Step(`^I call Create Volume from SnapshotNFS$`, f.iCallCreateVolumeFromSnapshotNFS)
s.Step(`^the wrong capacity$`, f.theWrongCapacity)
s.Step(`^the wrong storage pool$`, f.theWrongStoragePool)
Expand Down
1 change: 1 addition & 0 deletions test/e2e/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,4 @@ The following tests are implemented and run during the `zone-e2e` test.

1. Creates a stateful set of 7 replicas and ensures that everything is up and ready with the zone configuration.
2. Cordons a node (marks it as unschedulable), creates 7 volumes/pods, and ensures that none gets scheduled on the cordoned node.
3. Creates a stateful set of 7 replicas, creates a snapshot and restore pod for each and ensure that they are all running.
Loading

0 comments on commit 21c9dfd

Please sign in to comment.