Skip to content

Commit

Permalink
fix: remove dangling error checks
Browse files Browse the repository at this point in the history
Signed-off-by: Suleyman Akbas <[email protected]>
  • Loading branch information
suleymanakbas91 committed May 26, 2023
1 parent 9376f90 commit a7b0ce6
Show file tree
Hide file tree
Showing 7 changed files with 955 additions and 398 deletions.
2 changes: 2 additions & 0 deletions api/v1alpha1/lvmvolumegroupnodestatus_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ type LVMVolumeGroupNodeStatusSpec struct {
type VGStatusType string

const (
// VGStatusProgressing means that the VG creation is still in progress
VGStatusProgressing VGStatusType = "Progressing"
// VGStatusReady means that the vg has been created and is Ready
VGStatusReady VGStatusType = "Ready"
// VGStatusFailed means that the VG could not be created
Expand Down
32 changes: 15 additions & 17 deletions cmd/vgmanager/main.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
Copyright 2021 Red Hat Openshift Data Foundation.
Copyright © 2023 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand All @@ -17,50 +17,48 @@ limitations under the License.
package main

import (

// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.

"flag"
"os"

_ "k8s.io/client-go/plugin/pkg/client/auth"

lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1"
"github.com/openshift/lvm-operator/pkg/vgmanager"

"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
)

var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
scheme = runtime.NewScheme()
)

func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(lvmv1alpha1.AddToScheme(scheme))
}

var metricsAddr string
var probeAddr string
var developmentMode bool

func main() {
var metricsAddr string
var probeAddr string
var developmentMode bool
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&developmentMode, "development", false, "enable to enable development")
flag.BoolVar(&developmentMode, "development", false, "The switch to enable development mode.")
flag.Parse()

opts := zap.Options{}
opts.BindFlags(flag.CommandLine)
flag.Parse()
opts.Development = developmentMode

ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))

setupLog := ctrl.Log.WithName("setup")

mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Expand All @@ -80,7 +78,7 @@ func main() {
NodeName: os.Getenv("NODE_NAME"),
Namespace: os.Getenv("POD_NAMESPACE"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "LVMCluster")
setupLog.Error(err, "unable to create controller", "controller", "VGManager")
os.Exit(1)
}

Expand Down
8 changes: 3 additions & 5 deletions controllers/lvmcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -234,15 +234,13 @@ func (r *LVMClusterReconciler) updateLVMClusterStatus(ctx context.Context, insta
return err
}

var statusVgCount int
var readyVGCount int
var isReady, isDegraded, isFailed bool

for _, nodeItem := range vgNodeStatusList.Items {
for _, item := range nodeItem.Spec.LVMVGStatus {

statusVgCount++

if item.Status == lvmv1alpha1.VGStatusReady {
readyVGCount++
isReady = true
} else if item.Status == lvmv1alpha1.VGStatusDegraded {
isDegraded = true
Expand All @@ -268,7 +266,7 @@ func (r *LVMClusterReconciler) updateLVMClusterStatus(ctx context.Context, insta
instance.Status.State = lvmv1alpha1.LVMStatusFailed
} else if isDegraded {
instance.Status.State = lvmv1alpha1.LVMStatusDegraded
} else if isReady && expectedVgCount == statusVgCount {
} else if isReady && expectedVgCount == readyVGCount {
instance.Status.State = lvmv1alpha1.LVMStatusReady
instance.Status.Ready = true
}
Expand Down
184 changes: 184 additions & 0 deletions pkg/vgmanager/devices.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,184 @@
/*
Copyright © 2023 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package vgmanager

import (
"fmt"
"path/filepath"
"strings"

lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1"
"github.com/openshift/lvm-operator/pkg/internal"
)

func (r *VGReconciler) addDevicesToVG(vgs []VolumeGroup, vgName string, devices []internal.BlockDevice) error {
if len(devices) < 1 {
return fmt.Errorf("can't create vg %q with 0 devices", vgName)
}

// check if volume group is already present
vgFound := false
for _, vg := range vgs {
if vg.Name == vgName {
vgFound = true
}
}

args := []string{vgName}
for _, device := range devices {
if device.DevicePath != "" {
args = append(args, device.DevicePath)
} else {
args = append(args, device.KName)
}
}

// TODO: Check if we can use functions from lvm.go here
var cmd string
if vgFound {
r.Log.Info("extending an existing volume group", "VGName", vgName)
cmd = "/usr/sbin/vgextend"
} else {
r.Log.Info("creating a new volume group", "VGName", vgName)
cmd = "/usr/sbin/vgcreate"
}

_, err := r.executor.ExecuteCommandWithOutputAsHost(cmd, args...)
if err != nil {
return fmt.Errorf("failed to create or extend volume group %q using command '%s': %v", vgName, fmt.Sprintf("%s %s", cmd, strings.Join(args, " ")), err)
}

return nil
}

func (r *VGReconciler) getMatchingDevicesForVG(blockDevices []internal.BlockDevice, vgs []VolumeGroup, volumeGroup *lvmv1alpha1.LVMVolumeGroup) ([]internal.BlockDevice, []internal.BlockDevice, error) {
// filter devices based on DeviceSelector.Paths if specified
matchingDevices, err := r.filterMatchingDevices(blockDevices, vgs, volumeGroup)
if err != nil {
r.Log.Error(err, "failed to filter matching devices", "VGName", volumeGroup.Name)
return nil, nil, err
}

// filter only available devices based on device age and filters in FilterMap
matchingDevices, delayedDevices := r.filterAvailableDevices(matchingDevices)

return matchingDevices, delayedDevices, nil
}

// filterAvailableDevices returns:
// validDevices: the list of blockdevices considered available
// delayedDevices: the list of blockdevices considered available, but first observed less than 'minDeviceAge' time ago
func (r *VGReconciler) filterAvailableDevices(blockDevices []internal.BlockDevice) ([]internal.BlockDevice, []internal.BlockDevice) {
var availableDevices, delayedDevices []internal.BlockDevice
// using a label so `continue DeviceLoop` can be used to skip devices
DeviceLoop:
for _, blockDevice := range blockDevices {

// store device in deviceAgeMap
r.deviceAgeMap.storeDeviceAge(blockDevice.KName)

// check for partitions recursively
if blockDevice.HasChildren() {
childAvailableDevices, childDelayedDevices := r.filterAvailableDevices(blockDevice.Children)
availableDevices = append(availableDevices, childAvailableDevices...)
delayedDevices = append(delayedDevices, childDelayedDevices...)
}

devLogger := r.Log.WithValues("Device.Name", blockDevice.Name)
for name, filter := range FilterMap {
filterLogger := devLogger.WithValues("filter.Name", name)
valid, err := filter(blockDevice, r.executor)
if err != nil {
filterLogger.Error(err, "filter error")
valid = false
continue DeviceLoop
} else if !valid {
filterLogger.Info("does not match filter")
continue DeviceLoop
}
}
// check if the device is older than deviceMinAge
isOldEnough := r.deviceAgeMap.isOlderThan(blockDevice.KName)
if isOldEnough {
availableDevices = append(availableDevices, blockDevice)
} else {
delayedDevices = append(delayedDevices, blockDevice)
}
}
return availableDevices, delayedDevices
}

// filterMatchingDevices returns matched blockdevices
func (r *VGReconciler) filterMatchingDevices(blockDevices []internal.BlockDevice, vgs []VolumeGroup, volumeGroup *lvmv1alpha1.LVMVolumeGroup) ([]internal.BlockDevice, error) {
var filteredBlockDevices []internal.BlockDevice
if volumeGroup.Spec.DeviceSelector != nil && len(volumeGroup.Spec.DeviceSelector.Paths) > 0 {
for _, path := range volumeGroup.Spec.DeviceSelector.Paths {
diskName, err := filepath.EvalSymlinks(path)
if err != nil {
err = fmt.Errorf("unable to find symlink for disk path %s: %v", path, err)
return []internal.BlockDevice{}, err
}

isAlreadyExist := isDeviceAlreadyPartOfVG(vgs, diskName, volumeGroup)
if isAlreadyExist {
continue
}

blockDevice, ok := hasExactDisk(blockDevices, diskName)
if !ok {
err := fmt.Errorf("can not find device name %s in the available block devices", path)
return []internal.BlockDevice{}, err
}

blockDevice.DevicePath = path
filteredBlockDevices = append(filteredBlockDevices, blockDevice)
}

return filteredBlockDevices, nil
}

// return all available block devices if none is specified in the CR
return blockDevices, nil
}

func isDeviceAlreadyPartOfVG(vgs []VolumeGroup, diskName string, volumeGroup *lvmv1alpha1.LVMVolumeGroup) bool {
for _, vg := range vgs {
if vg.Name == volumeGroup.Name {
for _, pv := range vg.PVs {
if pv == diskName {
return true
}
}
}
}

return false
}

func hasExactDisk(blockDevices []internal.BlockDevice, deviceName string) (internal.BlockDevice, bool) {
for _, blockDevice := range blockDevices {
if blockDevice.KName == deviceName {
return blockDevice, true
}
if blockDevice.HasChildren() {
if device, exists := hasExactDisk(blockDevice.Children, deviceName); exists {
return device, true
}
}
}
return internal.BlockDevice{}, false
}
Loading

0 comments on commit a7b0ce6

Please sign in to comment.