Skip to content

Commit

Permalink
Merge branch 'openshift:oadp-1.4' into oadp-1.4
Browse files Browse the repository at this point in the history
  • Loading branch information
msfrucht authored Jul 31, 2024
2 parents f3dbd8d + c035379 commit 38a6290
Show file tree
Hide file tree
Showing 34 changed files with 881 additions and 165 deletions.
1 change: 1 addition & 0 deletions changelogs/unreleased/7863-kaovilai
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Requeue backup/restore in their respective controllers to unstuck InProgress status without requiring velero pod restart.
1 change: 1 addition & 0 deletions changelogs/unreleased/7926-sseago
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Don't consider unschedulable pods unrecoverable
1 change: 1 addition & 0 deletions changelogs/unreleased/7944-blackpiglet
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Expose the VolumeHelper to third-party plugins.
1 change: 1 addition & 0 deletions changelogs/unreleased/7949-Lyndon-Li
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fix issue #7904, add the limitation clarification for change PVC selected-node feature
1 change: 1 addition & 0 deletions changelogs/unreleased/7976-blackpiglet
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Check whether the volume's source is PVC before fetching its PV.
1 change: 1 addition & 0 deletions changelogs/unreleased/7998-blackpiglet
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Check whether the namespaces specified in namespace filter exist.
1 change: 1 addition & 0 deletions changelogs/unreleased/8006-shubham-pampattiwar
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Skip PV patch step in Restoe workflow for WaitForFirstConsumer VolumeBindingMode Pending state PVCs
1 change: 1 addition & 0 deletions changelogs/unreleased/8016-sseago
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Reuse existing plugin manager for get/put volume info
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ volumePolicies:
- Update VolumePolicy action type validation to account for `fs-backup` and `snapshot` as valid VolumePolicy actions.
- Modifications needed for `fs-backup` action:
- Now based on the specification of volume policy on backup request we will decide whether to go via legacy pod annotations approach or the newer volume policy based fs-backup action approach.
- If there is a presence of volume policy(fs-backup/snapshot) on the backup request that matches as an action for a volume we use the newer volume policy approach to get the list of the volumes for `fs-backup` action
- If there is a presence of volume policy(fs-backup/snapshot) on the backup request that matches as an action for a volume we use the newer volume policy approach to get the list of the volumes for `fs-backup` action
- Else continue with the annotation based legacy approach workflow.

- Modifications needed for `snapshot` action:
Expand Down
43 changes: 42 additions & 1 deletion internal/resourcepolicies/resource_policies.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,16 @@ limitations under the License.
package resourcepolicies

import (
"context"
"fmt"
"strings"

"github.com/pkg/errors"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
crclient "sigs.k8s.io/controller-runtime/pkg/client"

velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
)

type VolumeActionType string
Expand Down Expand Up @@ -148,7 +153,43 @@ func (p *Policies) Validate() error {
return nil
}

func GetResourcePoliciesFromConfig(cm *v1.ConfigMap) (*Policies, error) {
func GetResourcePoliciesFromBackup(
backup velerov1api.Backup,
client crclient.Client,
logger logrus.FieldLogger,
) (resourcePolicies *Policies, err error) {
if backup.Spec.ResourcePolicy != nil &&
strings.EqualFold(backup.Spec.ResourcePolicy.Kind, ConfigmapRefType) {
policiesConfigMap := &v1.ConfigMap{}
err = client.Get(
context.Background(),
crclient.ObjectKey{Namespace: backup.Namespace, Name: backup.Spec.ResourcePolicy.Name},
policiesConfigMap,
)
if err != nil {
logger.Errorf("Fail to get ResourcePolicies %s ConfigMap with error %s.",
backup.Namespace+"/"+backup.Spec.ResourcePolicy.Name, err.Error())
return nil, fmt.Errorf("fail to get ResourcePolicies %s ConfigMap with error %s",
backup.Namespace+"/"+backup.Spec.ResourcePolicy.Name, err.Error())
}
resourcePolicies, err = getResourcePoliciesFromConfig(policiesConfigMap)
if err != nil {
logger.Errorf("Fail to read ResourcePolicies from ConfigMap %s with error %s.",
backup.Namespace+"/"+backup.Name, err.Error())
return nil, fmt.Errorf("fail to read the ResourcePolicies from ConfigMap %s with error %s",
backup.Namespace+"/"+backup.Name, err.Error())
} else if err = resourcePolicies.Validate(); err != nil {
logger.Errorf("Fail to validate ResourcePolicies in ConfigMap %s with error %s.",
backup.Namespace+"/"+backup.Name, err.Error())
return nil, fmt.Errorf("fail to validate ResourcePolicies in ConfigMap %s with error %s",
backup.Namespace+"/"+backup.Name, err.Error())
}
}

return resourcePolicies, nil
}

func getResourcePoliciesFromConfig(cm *v1.ConfigMap) (*Policies, error) {
if cm == nil {
return nil, fmt.Errorf("could not parse config from nil configmap")
}
Expand Down
2 changes: 1 addition & 1 deletion internal/resourcepolicies/resource_policies_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ func TestGetResourcePoliciesFromConfig(t *testing.T) {
}

// Call the function and check for errors
resPolicies, err := GetResourcePoliciesFromConfig(cm)
resPolicies, err := getResourcePoliciesFromConfig(cm)
assert.Nil(t, err)

// Check that the returned resourcePolicies object contains the expected data
Expand Down
26 changes: 15 additions & 11 deletions internal/volumehelper/volume_policy_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,20 +143,24 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
}

if v.volumePolicy != nil {
pvc, err := kubeutil.GetPVCForPodVolume(&volume, &pod, v.client)
if err != nil {
v.logger.WithError(err).Errorf("fail to get PVC for pod %s", pod.Namespace+"/"+pod.Name)
return false, err
}
pv, err := kubeutil.GetPVForPVC(pvc, v.client)
if err != nil {
v.logger.WithError(err).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
return false, err
var resource interface{}
resource = &volume
if volume.VolumeSource.PersistentVolumeClaim != nil {
pvc, err := kubeutil.GetPVCForPodVolume(&volume, &pod, v.client)
if err != nil {
v.logger.WithError(err).Errorf("fail to get PVC for pod %s", pod.Namespace+"/"+pod.Name)
return false, err
}
resource, err = kubeutil.GetPVForPVC(pvc, v.client)
if err != nil {
v.logger.WithError(err).Errorf("fail to get PV for PVC %s", pvc.Namespace+"/"+pvc.Name)
return false, err
}
}

action, err := v.volumePolicy.GetMatchAction(pv)
action, err := v.volumePolicy.GetMatchAction(resource)
if err != nil {
v.logger.WithError(err).Errorf("fail to get VolumePolicy match action for PV %s", pv.Name)
v.logger.WithError(err).Error("fail to get VolumePolicy match action for volume")
return false, err
}

Expand Down
26 changes: 26 additions & 0 deletions internal/volumehelper/volume_policy_helper_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -552,6 +552,32 @@ func TestVolumeHelperImpl_ShouldPerformFSBackup(t *testing.T) {
shouldFSBackup: true,
expectedErr: false,
},
{
name: "Volume source is emptyDir, VolumePolicy match, return true and no error",
pod: builder.ForPod("ns", "pod-1").
Volumes(
&corev1.Volume{
Name: "",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
}).Result(),
resourcePolicies: &resourcepolicies.ResourcePolicies{
Version: "v1",
VolumePolicies: []resourcepolicies.VolumePolicy{
{
Conditions: map[string]interface{}{
"volumeTypes": []string{"emptyDir"},
},
Action: resourcepolicies.Action{
Type: resourcepolicies.FSBackup,
},
},
},
},
shouldFSBackup: true,
expectedErr: false,
},
{
name: "VolumePolicy match, action type is not fs-backup, return false and no error",
pod: builder.ForPod("ns", "pod-1").
Expand Down
17 changes: 17 additions & 0 deletions pkg/backup/actions/csi/pvc_action.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ import (
"github.com/vmware-tanzu/velero/pkg/kuberesource"
"github.com/vmware-tanzu/velero/pkg/label"
plugincommon "github.com/vmware-tanzu/velero/pkg/plugin/framework/common"
"github.com/vmware-tanzu/velero/pkg/plugin/utils/volumehelper"
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
biav2 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v2"
uploaderUtil "github.com/vmware-tanzu/velero/pkg/uploader/util"
Expand Down Expand Up @@ -229,6 +230,22 @@ func (p *pvcBackupItemAction) Execute(
return item, nil, "", nil, nil
}

shouldSnapshot, err := volumehelper.ShouldPerformSnapshotWithBackup(
item,
kuberesource.PersistentVolumeClaims,
*backup,
p.crClient,
p.log,
)
if err != nil {
return nil, nil, "", nil, err
}
if !shouldSnapshot {
p.log.Debugf("CSI plugin skip snapshot for PVC %s according to the VolumeHelper setting.",
pvc.Namespace+"/"+pvc.Name)
return nil, nil, "", nil, err
}

vs, err := p.createVolumeSnapshot(pvc, backup)
if err != nil {
return nil, nil, "", nil, err
Expand Down
16 changes: 16 additions & 0 deletions pkg/backup/actions/csi/pvc_action_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ func TestExecute(t *testing.T) {
expectedBackup *velerov1api.Backup
expectedDataUpload *velerov2alpha1.DataUpload
expectedPVC *corev1.PersistentVolumeClaim
resourcePolicy *corev1.ConfigMap
}{
{
name: "Skip PVC BIA when backup is in finalizing phase",
Expand Down Expand Up @@ -127,6 +128,16 @@ func TestExecute(t *testing.T) {
builder.WithLabels(velerov1api.BackupNameLabel, "test", velerov1api.VolumeSnapshotLabel, "")).
VolumeName("testPV").StorageClass("testSC").Phase(corev1.ClaimBound).Result(),
},
{
name: "Test ResourcePolicy",
backup: builder.ForBackup("velero", "test").ResourcePolicies("resourcePolicy").SnapshotVolumes(false).Result(),
resourcePolicy: builder.ForConfigMap("velero", "resourcePolicy").Data("policy", "{\"version\":\"v1\", \"volumePolicies\":[{\"conditions\":{\"csi\": {}},\"action\":{\"type\":\"snapshot\"}}]}").Result(),
pvc: builder.ForPersistentVolumeClaim("velero", "testPVC").VolumeName("testPV").StorageClass("testSC").Phase(corev1.ClaimBound).Result(),
pv: builder.ForPersistentVolume("testPV").CSI("hostpath", "testVolume").Result(),
sc: builder.ForStorageClass("testSC").Provisioner("hostpath").Result(),
vsClass: builder.ForVolumeSnapshotClass("tescVSClass").Driver("hostpath").ObjectMeta(builder.WithLabels(velerov1api.VolumeSnapshotClassSelectorLabel, "")).Result(),
expectedErr: nil,
},
}

for _, tc := range tests {
Expand All @@ -147,6 +158,9 @@ func TestExecute(t *testing.T) {
if tc.vsClass != nil {
require.NoError(t, crClient.Create(context.Background(), tc.vsClass))
}
if tc.resourcePolicy != nil {
require.NoError(t, crClient.Create(context.Background(), tc.resourcePolicy))
}

pvcBIA := pvcBackupItemAction{
log: logger,
Expand Down Expand Up @@ -190,6 +204,8 @@ func TestExecute(t *testing.T) {
resultUnstructed, _, _, _, err := pvcBIA.Execute(&unstructured.Unstructured{Object: pvcMap}, tc.backup)
if tc.expectedErr != nil {
require.Equal(t, err, tc.expectedErr)
} else {
require.NoError(t, err)
}

if tc.expectedDataUpload != nil {
Expand Down
32 changes: 3 additions & 29 deletions pkg/backup/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ type Backupper interface {
outBackupFile io.Writer,
backupItemActionResolver framework.BackupItemActionResolverV2,
asyncBIAOperations []*itemoperation.BackupOperation,
backupStore persistence.BackupStore,
) error
}

Expand Down Expand Up @@ -610,6 +611,7 @@ func (kb *kubernetesBackupper) FinalizeBackup(
outBackupFile io.Writer,
backupItemActionResolver framework.BackupItemActionResolverV2,
asyncBIAOperations []*itemoperation.BackupOperation,
backupStore persistence.BackupStore,
) error {
gzw := gzip.NewWriter(outBackupFile)
defer gzw.Close()
Expand Down Expand Up @@ -726,7 +728,7 @@ func (kb *kubernetesBackupper) FinalizeBackup(
}).Infof("Updated %d items out of an estimated total of %d (estimate will change throughout the backup finalizer)", len(backupRequest.BackedUpItems), totalItems)
}

backupStore, volumeInfos, err := kb.getVolumeInfos(*backupRequest.Backup, log)
volumeInfos, err := backupStore.GetBackupVolumeInfos(backupRequest.Backup.Name)
if err != nil {
log.WithError(err).Errorf("fail to get the backup VolumeInfos for backup %s", backupRequest.Name)
return err
Expand Down Expand Up @@ -810,34 +812,6 @@ type tarWriter interface {
WriteHeader(*tar.Header) error
}

func (kb *kubernetesBackupper) getVolumeInfos(
backup velerov1api.Backup,
log logrus.FieldLogger,
) (persistence.BackupStore, []*volume.BackupVolumeInfo, error) {
location := &velerov1api.BackupStorageLocation{}
if err := kb.kbClient.Get(context.Background(), kbclient.ObjectKey{
Namespace: backup.Namespace,
Name: backup.Spec.StorageLocation,
}, location); err != nil {
return nil, nil, errors.WithStack(err)
}

pluginManager := kb.pluginManager(log)
defer pluginManager.CleanupClients()

backupStore, storeErr := kb.backupStoreGetter.Get(location, pluginManager, log)
if storeErr != nil {
return nil, nil, storeErr
}

volumeInfos, err := backupStore.GetBackupVolumeInfos(backup.Name)
if err != nil {
return nil, nil, err
}

return backupStore, volumeInfos, nil
}

// updateVolumeInfos update the VolumeInfos according to the AsyncOperations
func updateVolumeInfos(
volumeInfos []*volume.BackupVolumeInfo,
Expand Down
19 changes: 0 additions & 19 deletions pkg/backup/backup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/kuberesource"
"github.com/vmware-tanzu/velero/pkg/persistence"
persistencemocks "github.com/vmware-tanzu/velero/pkg/persistence/mocks"
"github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt"
pluginmocks "github.com/vmware-tanzu/velero/pkg/plugin/mocks"
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
biav2 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v2"
vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1"
Expand Down Expand Up @@ -4519,23 +4517,6 @@ func TestBackupNamespaces(t *testing.T) {
}
}

func TestGetVolumeInfos(t *testing.T) {
h := newHarness(t)
pluginManager := new(pluginmocks.Manager)
backupStore := new(persistencemocks.BackupStore)
h.backupper.pluginManager = func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }
h.backupper.backupStoreGetter = NewFakeSingleObjectBackupStoreGetter(backupStore)
backupStore.On("GetBackupVolumeInfos", "backup-01").Return([]*volume.BackupVolumeInfo{}, nil)
pluginManager.On("CleanupClients").Return()

backup := builder.ForBackup("velero", "backup-01").StorageLocation("default").Result()
bsl := builder.ForBackupStorageLocation("velero", "default").Result()
require.NoError(t, h.backupper.kbClient.Create(context.Background(), bsl))

_, _, err := h.backupper.getVolumeInfos(*backup, h.log)
require.NoError(t, err)
}

func TestUpdateVolumeInfos(t *testing.T) {
timeExample := time.Date(2014, 6, 5, 11, 56, 45, 0, time.Local)
now := metav1.NewTime(timeExample)
Expand Down
3 changes: 1 addition & 2 deletions pkg/backup/item_backupper.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,6 @@ import (
"strings"
"time"

"github.com/vmware-tanzu/velero/internal/volumehelper"

"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1api "k8s.io/api/core/v1"
Expand All @@ -42,6 +40,7 @@ import (
"github.com/vmware-tanzu/velero/internal/hook"
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
"github.com/vmware-tanzu/velero/internal/volume"
"github.com/vmware-tanzu/velero/internal/volumehelper"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/archive"
"github.com/vmware-tanzu/velero/pkg/client"
Expand Down
17 changes: 17 additions & 0 deletions pkg/backup/item_collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -741,6 +741,23 @@ func (r *itemCollector) collectNamespaces(
return nil, errors.WithStack(err)
}

for _, includedNSName := range r.backupRequest.Backup.Spec.IncludedNamespaces {
nsExists := false
// Skip checking the namespace existing when it's "*".
if includedNSName == "*" {
continue
}
for _, unstructuredNS := range unstructuredList.Items {
if unstructuredNS.GetName() == includedNSName {
nsExists = true
}
}

if !nsExists {
log.Errorf("fail to get the namespace %s specified in backup.Spec.IncludedNamespaces", includedNSName)
}
}

var singleSelector labels.Selector
var orSelectors []labels.Selector

Expand Down
11 changes: 11 additions & 0 deletions pkg/backup/item_collector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,17 @@ func TestItemCollectorBackupNamespaces(t *testing.T) {
},
expectedTrackedNS: []string{"ns1", "ns2"},
},
{
name: "ns specified by the IncludeNamespaces cannot be found",
backup: builder.ForBackup("velero", "backup").IncludedNamespaces("ns1", "invalid", "*").Result(),
ie: collections.NewIncludesExcludes().Includes("ns1", "invalid", "*"),
namespaces: []*corev1.Namespace{
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Result(),
builder.ForNamespace("ns2").Result(),
builder.ForNamespace("ns3").Result(),
},
expectedTrackedNS: []string{"ns1"},
},
}

for _, tc := range tests {
Expand Down
Loading

0 comments on commit 38a6290

Please sign in to comment.