Skip to content

Commit

Permalink
feat: wipe devices before using if enabled
Browse files Browse the repository at this point in the history
Signed-off-by: Suleyman Akbas <[email protected]>
  • Loading branch information
suleymanakbas91 committed Sep 28, 2023
1 parent bd11b2a commit 7d3abc8
Show file tree
Hide file tree
Showing 17 changed files with 554 additions and 25 deletions.
6 changes: 6 additions & 0 deletions .mockery.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,9 @@ packages:
github.com/openshift/lvm-operator/pkg/lvm:
interfaces:
LVM:
github.com/openshift/lvm-operator/pkg/wipefs:
interfaces:
Wipefs:
github.com/openshift/lvm-operator/pkg/dmsetup:
interfaces:
Dmsetup:
17 changes: 6 additions & 11 deletions api/v1alpha1/lvmcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.

// LVMClusterSpec defines the desired state of LVMCluster
type LVMClusterSpec struct {
// Important: Run "make" to regenerate code after modifying this file
Expand Down Expand Up @@ -114,15 +111,13 @@ type DeviceSelector struct {
// We discourage using the device names as they can change over node restarts.
// +optional
OptionalPaths []string `json:"optionalPaths,omitempty"`
}

// type DeviceClassConfig struct {
// LVMConfig *LVMConfig `json:"lvmConfig,omitempty"`
// }

// type LVMConfig struct {
// thinProvision bool `json:"thinProvision,omitempty"`
// }
// ForceWipeDevicesAndDestroyAllData runs wipefs to wipe the devices.
// This can lead to data lose. Enable this only when you know that the disk
// does not contain any important data.
// +optional
ForceWipeDevicesAndDestroyAllData bool `json:"forceWipeDevicesAndDestroyAllData,omitempty"`
}

type LVMStateType string

Expand Down
6 changes: 6 additions & 0 deletions bundle/manifests/lvm.topolvm.io_lvmclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,12 @@ spec:
description: DeviceSelector is a set of rules that should
match for a device to be included in the LVMCluster
properties:
forceWipeDevicesAndDestroyAllData:
description: ForceWipeDevicesAndDestroyAllData runs
wipefs to wipe the devices. This can lead to data
lose. Enable this only when you know that the disk
does not contain any important data.
type: boolean
optionalPaths:
description: A list of device paths which could be chosen
for creating Volume Group. For example "/dev/disk/by-path/pci-0000:04:00.0-nvme-1"
Expand Down
5 changes: 5 additions & 0 deletions bundle/manifests/lvm.topolvm.io_lvmvolumegroups.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,11 @@ spec:
description: DeviceSelector is a set of rules that should match for
a device to be included in this TopoLVMCluster
properties:
forceWipeDevicesAndDestroyAllData:
description: ForceWipeDevicesAndDestroyAllData runs wipefs to
wipe the devices. This can lead to data lose. Enable this only
when you know that the disk does not contain any important data.
type: boolean
optionalPaths:
description: A list of device paths which could be chosen for
creating Volume Group. For example "/dev/disk/by-path/pci-0000:04:00.0-nvme-1"
Expand Down
4 changes: 4 additions & 0 deletions cmd/vgmanager/vgmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,13 @@ import (
"os"

"github.com/go-logr/logr"
"github.com/openshift/lvm-operator/pkg/dmsetup"
"github.com/openshift/lvm-operator/pkg/filter"
"github.com/openshift/lvm-operator/pkg/lsblk"
"github.com/openshift/lvm-operator/pkg/lvm"
"github.com/openshift/lvm-operator/pkg/lvmd"
"github.com/openshift/lvm-operator/pkg/vgmanager"
"github.com/openshift/lvm-operator/pkg/wipefs"
"github.com/spf13/cobra"
"sigs.k8s.io/controller-runtime/pkg/cache"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
Expand Down Expand Up @@ -106,6 +108,8 @@ func run(_ *cobra.Command, _ []string, opts *Options) error {
LVMD: lvmd.DefaultConfigurator(),
Scheme: mgr.GetScheme(),
LSBLK: lsblk.NewDefaultHostLSBLK(),
Wipefs: wipefs.NewDefaultHostWipefs(),
Dmsetup: dmsetup.NewDefaultHostDmsetup(),
LVM: lvm.NewDefaultHostLVM(),
NodeName: os.Getenv("NODE_NAME"),
Namespace: os.Getenv("POD_NAMESPACE"),
Expand Down
6 changes: 6 additions & 0 deletions config/crd/bases/lvm.topolvm.io_lvmclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,12 @@ spec:
description: DeviceSelector is a set of rules that should
match for a device to be included in the LVMCluster
properties:
forceWipeDevicesAndDestroyAllData:
description: ForceWipeDevicesAndDestroyAllData runs
wipefs to wipe the devices. This can lead to data
lose. Enable this only when you know that the disk
does not contain any important data.
type: boolean
optionalPaths:
description: A list of device paths which could be chosen
for creating Volume Group. For example "/dev/disk/by-path/pci-0000:04:00.0-nvme-1"
Expand Down
5 changes: 5 additions & 0 deletions config/crd/bases/lvm.topolvm.io_lvmvolumegroups.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,11 @@ spec:
description: DeviceSelector is a set of rules that should match for
a device to be included in this TopoLVMCluster
properties:
forceWipeDevicesAndDestroyAllData:
description: ForceWipeDevicesAndDestroyAllData runs wipefs to
wipe the devices. This can lead to data lose. Enable this only
when you know that the disk does not contain any important data.
type: boolean
optionalPaths:
description: A list of device paths which could be chosen for
creating Volume Group. For example "/dev/disk/by-path/pci-0000:04:00.0-nvme-1"
Expand Down
52 changes: 52 additions & 0 deletions pkg/dmsetup/dmsetup.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
package dmsetup

import (
"fmt"
"strings"

"github.com/openshift/lvm-operator/pkg/internal/exec"
)

var (
DefaultDMSetup = "/usr/sbin/dmsetup"
ErrReferenceNotFound = fmt.Errorf("device-mapper reference not found")
)

type Dmsetup interface {
Remove(deviceName string) error
}

type HostDmsetup struct {
exec.Executor
dmsetup string
}

func NewDefaultHostDmsetup() *HostDmsetup {
return NewHostDmsetup(&exec.CommandExecutor{}, DefaultDMSetup)
}

func NewHostDmsetup(executor exec.Executor, dmsetup string) *HostDmsetup {
return &HostDmsetup{
Executor: executor,
dmsetup: dmsetup,
}
}

// Remove removes the device's reference from the device-mapper
func (dmsetup *HostDmsetup) Remove(deviceName string) error {
if len(deviceName) == 0 {
return fmt.Errorf("failed to remove device-mapper reference. Device name is empty")
}

args := []string{"remove"}
args = append(args, deviceName)
output, err := dmsetup.ExecuteCommandWithOutputAsHost(dmsetup.dmsetup, args...)
if err != nil {
if strings.Contains(output, "not found") {
return ErrReferenceNotFound
}
return fmt.Errorf("failed to remove the reference from device-mapper %q: %v", deviceName, err)
}

return nil
}
74 changes: 74 additions & 0 deletions pkg/dmsetup/mocks/mock_dmsetup.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pkg/internal/exec/exec.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ var (
nsenterPath = "/usr/bin/nsenter"
)

// Executor is the interface for running exec commands
// Executor is the interface for running exec commands
type Executor interface {
ExecuteCommandWithOutput(command string, arg ...string) (string, error)
ExecuteCommandWithOutputAsHost(command string, arg ...string) (string, error)
Expand Down
20 changes: 10 additions & 10 deletions pkg/vgmanager/devices.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ func (r *VGReconciler) filterDevices(ctx context.Context, devices []lsblk.BlockD
}

// getNewDevicesToBeAdded gets all devices that should be added to the volume group
func (r *VGReconciler) getNewDevicesToBeAdded(ctx context.Context, blockDevices []lsblk.BlockDevice, vgs []lvm.VolumeGroup, volumeGroup *lvmv1alpha1.LVMVolumeGroup) ([]lsblk.BlockDevice, error) {
func (r *VGReconciler) getNewDevicesToBeAdded(ctx context.Context, blockDevices []lsblk.BlockDevice, nodeStatus *lvmv1alpha1.LVMVolumeGroupNodeStatus, volumeGroup *lvmv1alpha1.LVMVolumeGroup) ([]lsblk.BlockDevice, error) {
logger := log.FromContext(ctx)

var validBlockDevices []lsblk.BlockDevice
Expand All @@ -151,7 +151,7 @@ func (r *VGReconciler) getNewDevicesToBeAdded(ctx context.Context, blockDevices

// If Paths is specified, treat it as required paths
for _, path := range volumeGroup.Spec.DeviceSelector.Paths {
blockDevice, err := getValidDevice(path, blockDevices, vgs, volumeGroup)
blockDevice, err := getValidDevice(path, blockDevices, nodeStatus, volumeGroup)
if err != nil {
// An error for required devices is critical
return nil, fmt.Errorf("unable to validate device %s: %v", path, err)
Expand All @@ -168,7 +168,7 @@ func (r *VGReconciler) getNewDevicesToBeAdded(ctx context.Context, blockDevices
}

for _, path := range volumeGroup.Spec.DeviceSelector.OptionalPaths {
blockDevice, err := getValidDevice(path, blockDevices, vgs, volumeGroup)
blockDevice, err := getValidDevice(path, blockDevices, nodeStatus, volumeGroup)

// Check if we should skip this device
if err != nil {
Expand Down Expand Up @@ -200,11 +200,11 @@ func (r *VGReconciler) getNewDevicesToBeAdded(ctx context.Context, blockDevices
return validBlockDevices, nil
}

func isDeviceAlreadyPartOfVG(vgs []lvm.VolumeGroup, diskName string, volumeGroup *lvmv1alpha1.LVMVolumeGroup) bool {
for _, vg := range vgs {
if vg.Name == volumeGroup.Name {
for _, pv := range vg.PVs {
if pv.PvName == diskName {
func isDeviceAlreadyPartOfVG(nodeStatus *lvmv1alpha1.LVMVolumeGroupNodeStatus, diskName string, volumeGroup *lvmv1alpha1.LVMVolumeGroup) bool {
for _, vgStatus := range nodeStatus.Spec.LVMVGStatus {
if vgStatus.Name == volumeGroup.Name {
for _, pv := range vgStatus.Devices {
if pv == diskName {
return true
}
}
Expand Down Expand Up @@ -232,15 +232,15 @@ func hasExactDisk(blockDevices []lsblk.BlockDevice, deviceName string) (lsblk.Bl
//
// An error will be returned if the device is invalid
// No error and an empty BlockDevice object will be returned if this device should be skipped (ex: duplicate device)
func getValidDevice(devicePath string, blockDevices []lsblk.BlockDevice, vgs []lvm.VolumeGroup, volumeGroup *lvmv1alpha1.LVMVolumeGroup) (lsblk.BlockDevice, error) {
func getValidDevice(devicePath string, blockDevices []lsblk.BlockDevice, nodeStatus *lvmv1alpha1.LVMVolumeGroupNodeStatus, volumeGroup *lvmv1alpha1.LVMVolumeGroup) (lsblk.BlockDevice, error) {
// Make sure the symlink exists
diskName, err := filepath.EvalSymlinks(devicePath)
if err != nil {
return lsblk.BlockDevice{}, fmt.Errorf("unable to find symlink for disk path %s: %v", devicePath, err)
}

// Make sure this isn't a duplicate in the VG
if isDeviceAlreadyPartOfVG(vgs, diskName, volumeGroup) {
if isDeviceAlreadyPartOfVG(nodeStatus, diskName, volumeGroup) {
return lsblk.BlockDevice{}, nil // No error, we just don't want a duplicate
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/vgmanager/devices_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -570,7 +570,7 @@ func Test_getNewDevicesToBeAdded(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
ctx := log.IntoContext(context.Background(), testr.New(t))
availableDevices, err := r.getNewDevicesToBeAdded(ctx, tc.existingBlockDevices, tc.existingVGs, &tc.volumeGroup)
availableDevices, err := r.getNewDevicesToBeAdded(ctx, tc.existingBlockDevices, &v1alpha1.LVMVolumeGroupNodeStatus{}, &tc.volumeGroup)
if !tc.expectError {
assert.NoError(t, err)
} else {
Expand Down
21 changes: 19 additions & 2 deletions pkg/vgmanager/vgmanager_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,13 @@ import (
"github.com/google/go-cmp/cmp"
lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1"
"github.com/openshift/lvm-operator/controllers"
"github.com/openshift/lvm-operator/pkg/dmsetup"
"github.com/openshift/lvm-operator/pkg/filter"
"github.com/openshift/lvm-operator/pkg/lsblk"
"github.com/openshift/lvm-operator/pkg/lvm"
"github.com/openshift/lvm-operator/pkg/lvmd"
"github.com/openshift/lvm-operator/pkg/wipefs"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -87,6 +90,8 @@ type VGReconciler struct {
LVMD lvmd.Configurator
lvm.LVM
lsblk.LSBLK
wipefs.Wipefs
dmsetup.Dmsetup
NodeName string
Namespace string
Filters func(*lvmv1alpha1.LVMVolumeGroup, lvm.LVM, lsblk.LSBLK) filter.Filters
Expand Down Expand Up @@ -128,12 +133,13 @@ func (r *VGReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Re
}
}

return r.reconcile(ctx, volumeGroup)
return r.reconcile(ctx, volumeGroup, nodeStatus)
}

func (r *VGReconciler) reconcile(
ctx context.Context,
volumeGroup *lvmv1alpha1.LVMVolumeGroup,
nodeStatus *lvmv1alpha1.LVMVolumeGroupNodeStatus,
) (ctrl.Result, error) {
logger := log.FromContext(ctx)
// Check if the LVMVolumeGroup resource is deleted
Expand Down Expand Up @@ -184,10 +190,21 @@ func (r *VGReconciler) reconcile(
return ctrl.Result{}, fmt.Errorf("failed to list block devices: %w", err)
}

wiped, err := r.wipeDevicesIfNecessary(ctx, volumeGroup, nodeStatus, blockDevices)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to wipe devices: %w", err)
}
if wiped {
blockDevices, err = r.LSBLK.ListBlockDevices()
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to list block devices: %w", err)
}
}

// Get the available block devices that can be used for this volume group
// valid means that it can be used to create or extend the volume group
// devices that are already part of the volume group will not be returned
newDevices, err := r.getNewDevicesToBeAdded(ctx, blockDevices, vgs, volumeGroup)
newDevices, err := r.getNewDevicesToBeAdded(ctx, blockDevices, nodeStatus, volumeGroup)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to get matching available block devices for volumegroup %s: %w", volumeGroup.GetName(), err)
}
Expand Down
Loading

0 comments on commit 7d3abc8

Please sign in to comment.