diff --git a/api/v1alpha1/lvmcluster_types.go b/api/v1alpha1/lvmcluster_types.go index b5e713c84..e466c6bce 100644 --- a/api/v1alpha1/lvmcluster_types.go +++ b/api/v1alpha1/lvmcluster_types.go @@ -115,6 +115,12 @@ type DeviceSelector struct { // We discourage using the device names as they can change over node restarts. // +optional OptionalPaths []string `json:"optionalPaths,omitempty"` + + // ForceWipeDevicesAndDestroyAllData runs wipefs to wipe the devices. + // This can lead to data lose. Please use this only when you know that the disk + // does not contain any important data. + // +optional + ForceWipeDevicesAndDestroyAllData bool `json:"forceWipeDisksAndDestroyAllData,omitempty"` } // type DeviceClassConfig struct { diff --git a/bundle/manifests/lvm.topolvm.io_lvmclusters.yaml b/bundle/manifests/lvm.topolvm.io_lvmclusters.yaml index 3727f4c3a..d5a642537 100644 --- a/bundle/manifests/lvm.topolvm.io_lvmclusters.yaml +++ b/bundle/manifests/lvm.topolvm.io_lvmclusters.yaml @@ -56,6 +56,12 @@ spec: description: DeviceSelector is a set of rules that should match for a device to be included in the LVMCluster properties: + forceWipeDisksAndDestroyAllData: + description: ForceWipeDevicesAndDestroyAllData runs + wipefs to wipe the devices. This can lead to data + lose. Please use this only when you know that the + disk does not contain any important data. + type: boolean optionalPaths: description: A list of device paths which could be chosen for creating Volume Group. For example "/dev/disk/by-path/pci-0000:04:00.0-nvme-1" diff --git a/bundle/manifests/lvm.topolvm.io_lvmvolumegroups.yaml b/bundle/manifests/lvm.topolvm.io_lvmvolumegroups.yaml index 2849b3cf4..ecfb59a2a 100644 --- a/bundle/manifests/lvm.topolvm.io_lvmvolumegroups.yaml +++ b/bundle/manifests/lvm.topolvm.io_lvmvolumegroups.yaml @@ -42,6 +42,12 @@ spec: description: DeviceSelector is a set of rules that should match for a device to be included in this TopoLVMCluster properties: + forceWipeDisksAndDestroyAllData: + description: ForceWipeDevicesAndDestroyAllData runs wipefs to + wipe the devices. This can lead to data lose. Please use this + only when you know that the disk does not contain any important + data. + type: boolean optionalPaths: description: A list of device paths which could be chosen for creating Volume Group. For example "/dev/disk/by-path/pci-0000:04:00.0-nvme-1" diff --git a/bundle/manifests/lvms-operator.clusterserviceversion.yaml b/bundle/manifests/lvms-operator.clusterserviceversion.yaml index aab4babaa..35f2e1b49 100644 --- a/bundle/manifests/lvms-operator.clusterserviceversion.yaml +++ b/bundle/manifests/lvms-operator.clusterserviceversion.yaml @@ -616,7 +616,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name - image: quay.io/lvms_dev/lvms-operator:latest + image: quay.io/sakbas/lvms-operator:11.09 livenessProbe: httpGet: path: /healthz @@ -661,7 +661,7 @@ spec: memory: 20Mi - command: - /metricsexporter - image: quay.io/lvms_dev/lvms-operator:latest + image: quay.io/sakbas/lvms-operator:11.09 name: metricsexporter resources: requests: diff --git a/cmd/vgmanager/main.go b/cmd/vgmanager/main.go index f0027808d..c060255e6 100644 --- a/cmd/vgmanager/main.go +++ b/cmd/vgmanager/main.go @@ -26,6 +26,7 @@ import ( "github.com/openshift/lvm-operator/pkg/lvm" "github.com/openshift/lvm-operator/pkg/lvmd" "github.com/openshift/lvm-operator/pkg/vgmanager" + "github.com/openshift/lvm-operator/pkg/wipefs" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -86,6 +87,7 @@ func main() { LVMD: lvmd.DefaultConfigurator(), Scheme: mgr.GetScheme(), LSBLK: lsblk.NewDefaultHostLSBLK(), + Wipefs: wipefs.NewDefaultHostWipefs(), LVM: lvm.NewDefaultHostLVM(), NodeName: os.Getenv("NODE_NAME"), Namespace: os.Getenv("POD_NAMESPACE"), diff --git a/config/crd/bases/lvm.topolvm.io_lvmclusters.yaml b/config/crd/bases/lvm.topolvm.io_lvmclusters.yaml index 9dd8a935f..8c0696fc5 100644 --- a/config/crd/bases/lvm.topolvm.io_lvmclusters.yaml +++ b/config/crd/bases/lvm.topolvm.io_lvmclusters.yaml @@ -52,6 +52,12 @@ spec: description: DeviceSelector is a set of rules that should match for a device to be included in the LVMCluster properties: + forceWipeDisksAndDestroyAllData: + description: ForceWipeDevicesAndDestroyAllData runs + wipefs to wipe the devices. This can lead to data + lose. Please use this only when you know that the + disk does not contain any important data. + type: boolean optionalPaths: description: A list of device paths which could be chosen for creating Volume Group. For example "/dev/disk/by-path/pci-0000:04:00.0-nvme-1" diff --git a/config/crd/bases/lvm.topolvm.io_lvmvolumegroups.yaml b/config/crd/bases/lvm.topolvm.io_lvmvolumegroups.yaml index 2cc3b6fd7..1f50512b2 100644 --- a/config/crd/bases/lvm.topolvm.io_lvmvolumegroups.yaml +++ b/config/crd/bases/lvm.topolvm.io_lvmvolumegroups.yaml @@ -42,6 +42,12 @@ spec: description: DeviceSelector is a set of rules that should match for a device to be included in this TopoLVMCluster properties: + forceWipeDisksAndDestroyAllData: + description: ForceWipeDevicesAndDestroyAllData runs wipefs to + wipe the devices. This can lead to data lose. Please use this + only when you know that the disk does not contain any important + data. + type: boolean optionalPaths: description: A list of device paths which could be chosen for creating Volume Group. For example "/dev/disk/by-path/pci-0000:04:00.0-nvme-1" diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 0b5e02e69..95b47cc32 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -7,6 +7,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: quay.io/lvms_dev/lvms-operator - newTag: latest + newName: quay.io/sakbas/lvms-operator + newTag: "11.09" namePrefix: lvms- diff --git a/olm-deploy/kustomization.yaml b/olm-deploy/kustomization.yaml index 2622d63d6..11f353fe6 100644 --- a/olm-deploy/kustomization.yaml +++ b/olm-deploy/kustomization.yaml @@ -14,8 +14,8 @@ configurations: # replace catalogsource image images: - name: catalog-img - newName: quay.io/lvms_dev/lvms-operator-catalog - newTag: latest + newName: quay.io/sakbas/lvms-operator-catalog + newTag: "11.09" patches: - patch: |- diff --git a/pkg/internal/exec/exec.go b/pkg/internal/exec/exec.go index 6b42826e6..cde2baa1d 100644 --- a/pkg/internal/exec/exec.go +++ b/pkg/internal/exec/exec.go @@ -26,7 +26,7 @@ var ( nsenterPath = "/usr/bin/nsenter" ) -// Executor is the interface for running exec commands +// Executor is the interface for running exec commands type Executor interface { ExecuteCommandWithOutput(command string, arg ...string) (string, error) ExecuteCommandWithOutputAsHost(command string, arg ...string) (string, error) diff --git a/pkg/vgmanager/devices.go b/pkg/vgmanager/devices.go index 9895a4432..5c9797364 100644 --- a/pkg/vgmanager/devices.go +++ b/pkg/vgmanager/devices.go @@ -128,23 +128,21 @@ func (r *VGReconciler) filterMatchingDevices(ctx context.Context, blockDevices [ } // If Paths is specified, treat it as required paths - if len(volumeGroup.Spec.DeviceSelector.Paths) > 0 { - for _, path := range volumeGroup.Spec.DeviceSelector.Paths { - blockDevice, err := getValidDevice(path, blockDevices, vgs, volumeGroup) - if err != nil { - // An error for required devices is critical - return nil, fmt.Errorf("unable to validate device %s: %v", path, err) - } - - // Check if we should skip this device - if blockDevice.DevicePath == "" { - logger.Info(fmt.Sprintf("skipping required device that is already part of volume group %s: %s", volumeGroup.Name, path)) - devicesAlreadyInVG = true - continue - } + for _, path := range volumeGroup.Spec.DeviceSelector.Paths { + blockDevice, err := getValidDevice(path, blockDevices, vgs, volumeGroup) + if err != nil { + // An error for required devices is critical + return nil, fmt.Errorf("unable to validate device %s: %v", path, err) + } - filteredBlockDevices = append(filteredBlockDevices, blockDevice) + // Check if we should skip this device + if blockDevice.DevicePath == "" { + logger.Info(fmt.Sprintf("skipping required device that is already part of volume group %s: %s", volumeGroup.Name, path)) + devicesAlreadyInVG = true + continue } + + filteredBlockDevices = append(filteredBlockDevices, blockDevice) } // Check for any optional paths diff --git a/pkg/vgmanager/vgmanager_controller.go b/pkg/vgmanager/vgmanager_controller.go index 39dfdeabd..aca139f33 100644 --- a/pkg/vgmanager/vgmanager_controller.go +++ b/pkg/vgmanager/vgmanager_controller.go @@ -19,6 +19,7 @@ package vgmanager import ( "context" "fmt" + "github.com/openshift/lvm-operator/pkg/wipefs" "strconv" "time" @@ -30,7 +31,6 @@ import ( "github.com/openshift/lvm-operator/pkg/lsblk" "github.com/openshift/lvm-operator/pkg/lvm" "github.com/openshift/lvm-operator/pkg/lvmd" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -88,6 +88,7 @@ type VGReconciler struct { LVMD lvmd.Configurator lvm.LVM lsblk.LSBLK + wipefs.Wipefs NodeName string Namespace string Filters func(lvm.LVM, lsblk.LSBLK) filter.Filters @@ -174,13 +175,46 @@ func (r *VGReconciler) reconcile(ctx context.Context, volumeGroup *lvmv1alpha1.L return ctrl.Result{}, fmt.Errorf("failed to list block devices: %w", err) } + wiped := false + if volumeGroup.Spec.DeviceSelector.ForceWipeDevicesAndDestroyAllData { + allPaths := append(volumeGroup.Spec.DeviceSelector.Paths, volumeGroup.Spec.DeviceSelector.OptionalPaths...) + for _, path := range allPaths { + if isDeviceAlreadyPartOfVG(vgs, path, volumeGroup) { + continue + } + for _, device := range blockDevices { + if device.KName == path { + if err := r.Wipefs.Wipe(device.KName); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to wipe the device %s: %w", device.KName, err) + } else { + wiped = true + logger.Info("device wiped successfully", "deviceName", device.KName) + } + for _, child := range device.Children { + if err := r.Wipefs.RemoveMapperReference(child.KName); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to remove device-mapper reference %s for device %s: %w", child.KName, device.KName, err) + } else { + logger.Info("device-mapper reference removed successfully", "deviceName", device.KName, "childName", child.KName) + } + } + } + } + } + } + if wiped { + blockDevices, err = r.ListBlockDevices() + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to list block devices: %w", err) + } + } + // Get the available block devices that can be used for this volume group availableDevices, err := r.getAvailableDevicesForVG(ctx, blockDevices, vgs, volumeGroup) if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to get block devices for volumegroup %s: %w", volumeGroup.GetName(), err) + return ctrl.Result{}, fmt.Errorf("failed to get available block devices for volumegroup %s: %w", volumeGroup.GetName(), err) } - logger.Info("listing available and delayed devices", "availableDevices", availableDevices) + logger.Info("listing available devices", "availableDevices", availableDevices) // If there are no available devices, that could mean either // - There is no available devices to attach to the volume group diff --git a/pkg/wipefs/wipefs.go b/pkg/wipefs/wipefs.go new file mode 100644 index 000000000..c1d73db50 --- /dev/null +++ b/pkg/wipefs/wipefs.go @@ -0,0 +1,67 @@ +package wipefs + +import ( + "fmt" + + "github.com/openshift/lvm-operator/pkg/internal/exec" +) + +var ( + DefaultWipefs = "/usr/bin/wipefs" + DefaultDMSetup = "/usr/sbin/dmsetup" +) + +type Wipefs interface { + Wipe(deviceName string) error + RemoveMapperReference(deviceName string) error +} + +type HostWipefs struct { + exec.Executor + wipefs string + dmsetup string +} + +func NewDefaultHostWipefs() *HostWipefs { + return NewHostWipefs(&exec.CommandExecutor{}, DefaultWipefs, DefaultDMSetup) +} + +func NewHostWipefs(executor exec.Executor, wipefs, dmsetup string) *HostWipefs { + return &HostWipefs{ + Executor: executor, + wipefs: wipefs, + dmsetup: dmsetup, + } +} + +// Wipe wipes the device only if force delete flag is set +func (wipefs *HostWipefs) Wipe(deviceName string) error { + if len(deviceName) == 0 { + return fmt.Errorf("failed to wipe the device. Device name is empty") + } + + args := []string{"--all", "--force"} + args = append(args, deviceName) + _, err := wipefs.ExecuteCommandWithOutputAsHost(wipefs.wipefs, args...) + if err != nil { + return fmt.Errorf("failed to wipe the device %q. %v", deviceName, err) + } + + return nil +} + +// RemoveMapperReference removes the device's reference from the device-mapper +func (wipefs *HostWipefs) RemoveMapperReference(deviceName string) error { + if len(deviceName) == 0 { + return fmt.Errorf("failed to remove device-mapper reference. Device name is empty") + } + + args := []string{"remove"} + args = append(args, deviceName) + _, err := wipefs.ExecuteCommandWithOutputAsHost(wipefs.dmsetup, args...) + if err != nil { + return fmt.Errorf("failed to remove the reference from device-mapper %q. %v", deviceName, err) + } + + return nil +}