From 1a54002e001bce86c319ec54d354dba5b28d6da7 Mon Sep 17 00:00:00 2001 From: CraneShiEMC <64512450+CraneShiEMC@users.noreply.github.com> Date: Tue, 27 Jun 2023 14:34:11 +0800 Subject: [PATCH] [ISSUE-1018] Custom Storage Group Feature (#997) * add StorageGroup property in proto message Signed-off-by: Shi, Crane * generate types.pb.go from types.proto Signed-off-by: Shi, Crane * make non-lvg volume creation consistent with storage group label Signed-off-by: Shi, Crane * set storageGroup to volume CR Signed-off-by: Shi, Crane * support syncing storage group label to LVG CR if necessary Signed-off-by: Shi, Crane * fix Signed-off-by: Shi, Crane * fix UT Signed-off-by: Shi, Crane * remove setting storageGroup property in volume CR eventually Signed-off-by: Shi, Crane * sync driveCR's storage-group label to AC Signed-off-by: Shi, Crane * fix UT Signed-off-by: Shi, Crane * fix UT Signed-off-by: Shi, Crane * rename and extract the storagegroup label key as constants Signed-off-by: Shi, Crane * sync the storage-group label removal from drives to ACs Signed-off-by: Shi, Crane * remove sync storage-group label from drive to AC in capacitycontroller Signed-off-by: Shi, Crane * Revert "remove sync storage-group label from drive to AC in capacitycontroller" This reverts commit 7d570515c3a3d694c0dd9fd8aa60b3c5e4197250. Signed-off-by: Shi, Crane * Add StorageGroup Schema Definition and generated files Signed-off-by: Shi, Crane * refine StorageGroup definition Signed-off-by: Shi, Crane * Add storagegroup controller Signed-off-by: Shi, Crane * Add storagegroup crd to scheme Signed-off-by: Shi, Crane * handling of storagegroup creation Signed-off-by: Shi, Crane * remove sync storage-group label from drive to AC in capacitycontroller Signed-off-by: Shi, Crane * fix Signed-off-by: Shi, Crane * add debug log Signed-off-by: Shi, Crane * support storage-group deletion Signed-off-by: Shi, Crane * Handle invalidField and make MatchFields ANDed Signed-off-by: Shi, Crane * support NumberDrivesPerNode in DriveSelector Signed-off-by: Shi, Crane * fix & add log for noDriveSelected and invalidField Signed-off-by: Shi, Crane * refine log Signed-off-by: Shi, Crane * fix issue in noDriveSelected and numberDrivesPerNode support Signed-off-by: Shi, Crane * refine log Signed-off-by: Shi, Crane * fix matchFields issue on bool type Signed-off-by: Shi, Crane * fix the issue that drive with non-lvg volume may be selected Signed-off-by: Shi, Crane * fix golint Signed-off-by: Shi, Crane * fix golint Signed-off-by: Shi, Crane * refine handleStorageGroupCreation Signed-off-by: Shi, Crane * refine handleStorageGroupCreation Signed-off-by: Shi, Crane * refine to fix golint error Signed-off-by: Shi, Crane * fix golint Signed-off-by: Shi, Crane * refine log Signed-off-by: Shi, Crane * Change storagegroup print column Signed-off-by: Shi, Crane * support manual storage-group labeling on drive again Signed-off-by: Shi, Crane * only watch storagegroup delete event in storagegroup controller Signed-off-by: Shi, Crane * Refactor func to add/rm sg label and enhance manual sg labeling Signed-off-by: Shi, Crane * support to get ac for drive with lvg in drive sg label sync Signed-off-by: Shi, Crane * now also include drive del event for storagegroup controller Signed-off-by: Shi, Crane * fix log error Signed-off-by: Shi, Crane * fix that no del event of sg is caught Signed-off-by: Shi, Crane * refine log & add initial draft of func syncDriveOnAllStorageGroups Signed-off-by: Shi, Crane * support simple status tracking of storagegroup Signed-off-by: Shi, Crane * add support of storage group label sync on new drive creation Signed-off-by: Shi, Crane * fix lint error Signed-off-by: Shi, Crane * fix lint error Signed-off-by: Shi, Crane * service procedure only supports storage group whose numDrivesPerNode is 0 Signed-off-by: Shi, Crane * fix sg nil annotation panic error Signed-off-by: Shi, Crane * service procedure support of storage group whose numDrivesPerNode>0 Signed-off-by: Shi, Crane * fix go lint error Signed-off-by: Shi, Crane * fix issue in reconcile of sg with numberDrivesPerNode>0 Signed-off-by: Shi, Crane * refine logic for reconcile of sg with numDrivesPerNode>0 Signed-off-by: Shi, Crane * sg label restore on drive selected by sg with numDrviesPerNode>0 Signed-off-by: Shi, Crane * refine the logic to sync drive label on sg with numDrivesPerNode>0 Signed-off-by: Shi, Crane * Revert syncing storage group label to LVG CR if necessary Signed-off-by: Shi, Crane * fix error in log Signed-off-by: Shi, Crane * add more log for adding sg label to drive Signed-off-by: Shi, Crane * refine log Signed-off-by: Shi, Crane * refine code comment and increase UT coverage Signed-off-by: Shi, Crane * try increase UT coverage Signed-off-by: Shi, Crane * resolve review comments Signed-off-by: Shi, Crane * refine storagegroupcontroller updateEventFilter & nil labels case handle Signed-off-by: Shi, Crane * try fix golint Signed-off-by: Shi, Crane * try to fix golint Signed-off-by: Shi, Crane * fix devkit base image tag to 15.4 to resolve devkit image build failure Signed-off-by: Shi, Crane * nil map can still work for map entry read Signed-off-by: Shi, Crane * refine error handling and error log Signed-off-by: Shi, Crane * add comments to main function Signed-off-by: Shi, Crane * refine error messages for labeling and label removal in sg handling Signed-off-by: Shi, Crane * removal some deprecated bool flags Signed-off-by: Shi, Crane * sync storage group label to LVG CR if necessary Signed-off-by: Shi, Crane * refine sg ctrl's drive update event predicate to include drive removal Signed-off-by: Shi, Crane * still only focus on drive sg label update event Signed-off-by: Shi, Crane --------- Signed-off-by: Shi, Crane --- Makefile | 2 + api/generated/v1/types.pb.go | 220 +++++-- api/v1/constants.go | 3 + api/v1/storagegroupcrd/groupversion_info.go | 38 ++ api/v1/storagegroupcrd/storagegroup_types.go | 58 ++ .../storagegroupcrd/zz_generated.deepcopy.go | 60 ++ api/v1/types.proto | 11 + cmd/controller/main.go | 12 + devkit/Dockerfile | 2 +- pkg/base/capacityplanner/node_capacity.go | 2 +- pkg/base/k8s/kubeclient.go | 19 +- pkg/base/k8s/kubeclient_test.go | 15 +- pkg/common/ac_operations.go | 6 +- pkg/common/volume_operations.go | 9 +- pkg/common/volume_operations_test.go | 7 +- .../reservation/reservationcontroller.go | 2 +- .../storagegroup/storagegroupcontroller.go | 613 ++++++++++++++++++ pkg/node/volumemgr.go | 2 +- pkg/node/volumemgr_test.go | 2 +- pkg/scheduler/extender/extender.go | 5 +- 20 files changed, 1017 insertions(+), 71 deletions(-) create mode 100644 api/v1/storagegroupcrd/groupversion_info.go create mode 100644 api/v1/storagegroupcrd/storagegroup_types.go create mode 100644 api/v1/storagegroupcrd/zz_generated.deepcopy.go create mode 100644 pkg/crcontrollers/storagegroup/storagegroupcontroller.go diff --git a/Makefile b/Makefile index 02100f5fa..b74d9a1a3 100644 --- a/Makefile +++ b/Makefile @@ -112,6 +112,7 @@ generate-deepcopy: controller-gen object paths=api/v1/drivecrd/drive_types.go paths=api/v1/drivecrd/groupversion_info.go output:dir=api/v1/drivecrd controller-gen object paths=api/v1/lvgcrd/logicalvolumegroup_types.go paths=api/v1/lvgcrd/groupversion_info.go output:dir=api/v1/lvgcrd controller-gen object paths=api/v1/nodecrd/node_types.go paths=api/v1/nodecrd/groupversion_info.go output:dir=api/v1/nodecrd + controller-gen object paths=api/v1/storagegroupcrd/storagegroup_types.go paths=api/v1/storagegroupcrd/groupversion_info.go output:dir=api/v1/storagegroupcrd generate-baremetal-crds: install-controller-gen controller-gen $(CRD_OPTIONS) paths=api/v1/availablecapacitycrd/availablecapacity_types.go paths=api/v1/availablecapacitycrd/groupversion_info.go output:crd:dir=$(CSI_CHART_CRDS_PATH) @@ -120,6 +121,7 @@ generate-baremetal-crds: install-controller-gen controller-gen $(CRD_OPTIONS) paths=api/v1/drivecrd/drive_types.go paths=api/v1/drivecrd/groupversion_info.go output:crd:dir=$(CSI_CHART_CRDS_PATH) controller-gen $(CRD_OPTIONS) paths=api/v1/lvgcrd/logicalvolumegroup_types.go paths=api/v1/lvgcrd/groupversion_info.go output:crd:dir=$(CSI_CHART_CRDS_PATH) controller-gen $(CRD_OPTIONS) paths=api/v1/nodecrd/node_types.go paths=api/v1/nodecrd/groupversion_info.go output:crd:dir=$(CSI_CHART_CRDS_PATH) + controller-gen $(CRD_OPTIONS) paths=api/v1/storagegroupcrd/storagegroup_types.go paths=api/v1/storagegroupcrd/groupversion_info.go output:crd:dir=$(CSI_CHART_CRDS_PATH) generate-api: compile-proto generate-baremetal-crds generate-deepcopy diff --git a/api/generated/v1/types.pb.go b/api/generated/v1/types.pb.go index a4566b19c..4ebaa5bf7 100644 --- a/api/generated/v1/types.pb.go +++ b/api/generated/v1/types.pb.go @@ -245,6 +245,7 @@ type Volume struct { Usage string `protobuf:"bytes,13,opt,name=Usage,proto3" json:"Usage,omitempty"` // inline volumes are not support anymore. need to remove field in the next version Ephemeral bool `protobuf:"varint,14,opt,name=Ephemeral,proto3" json:"Ephemeral,omitempty"` + StorageGroup string `protobuf:"bytes,15,opt,name=StorageGroup,proto3" json:"StorageGroup,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -373,6 +374,13 @@ func (m *Volume) GetEphemeral() bool { return false } +func (m *Volume) GetStorageGroup() string { + if m != nil { + return m.StorageGroup + } + return "" +} + type AvailableCapacity struct { Location string `protobuf:"bytes,1,opt,name=Location,proto3" json:"Location,omitempty"` NodeId string `protobuf:"bytes,2,opt,name=NodeId,proto3" json:"NodeId,omitempty"` @@ -601,6 +609,7 @@ type CapacityRequest struct { Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` StorageClass string `protobuf:"bytes,2,opt,name=StorageClass,proto3" json:"StorageClass,omitempty"` Size int64 `protobuf:"varint,3,opt,name=Size,proto3" json:"Size,omitempty"` + StorageGroup string `protobuf:"bytes,4,opt,name=StorageGroup,proto3" json:"StorageGroup,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -652,6 +661,13 @@ func (m *CapacityRequest) GetSize() int64 { return 0 } +func (m *CapacityRequest) GetStorageGroup() string { + if m != nil { + return m.StorageGroup + } + return "" +} + type LogicalVolumeGroup struct { Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` Node string `protobuf:"bytes,2,opt,name=Node,proto3" json:"Node,omitempty"` @@ -787,6 +803,92 @@ func (m *Node) GetAddresses() map[string]string { return nil } +type StorageGroupSpec struct { + DriveSelector *DriveSelector `protobuf:"bytes,1,opt,name=driveSelector,proto3" json:"driveSelector,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StorageGroupSpec) Reset() { *m = StorageGroupSpec{} } +func (m *StorageGroupSpec) String() string { return proto.CompactTextString(m) } +func (*StorageGroupSpec) ProtoMessage() {} +func (*StorageGroupSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{9} +} + +func (m *StorageGroupSpec) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StorageGroupSpec.Unmarshal(m, b) +} +func (m *StorageGroupSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StorageGroupSpec.Marshal(b, m, deterministic) +} +func (m *StorageGroupSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageGroupSpec.Merge(m, src) +} +func (m *StorageGroupSpec) XXX_Size() int { + return xxx_messageInfo_StorageGroupSpec.Size(m) +} +func (m *StorageGroupSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StorageGroupSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageGroupSpec proto.InternalMessageInfo + +func (m *StorageGroupSpec) GetDriveSelector() *DriveSelector { + if m != nil { + return m.DriveSelector + } + return nil +} + +type DriveSelector struct { + NumberDrivesPerNode int32 `protobuf:"varint,1,opt,name=numberDrivesPerNode,proto3" json:"numberDrivesPerNode,omitempty"` + MatchFields map[string]string `protobuf:"bytes,2,rep,name=matchFields,proto3" json:"matchFields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DriveSelector) Reset() { *m = DriveSelector{} } +func (m *DriveSelector) String() string { return proto.CompactTextString(m) } +func (*DriveSelector) ProtoMessage() {} +func (*DriveSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{10} +} + +func (m *DriveSelector) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DriveSelector.Unmarshal(m, b) +} +func (m *DriveSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DriveSelector.Marshal(b, m, deterministic) +} +func (m *DriveSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_DriveSelector.Merge(m, src) +} +func (m *DriveSelector) XXX_Size() int { + return xxx_messageInfo_DriveSelector.Size(m) +} +func (m *DriveSelector) XXX_DiscardUnknown() { + xxx_messageInfo_DriveSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_DriveSelector proto.InternalMessageInfo + +func (m *DriveSelector) GetNumberDrivesPerNode() int32 { + if m != nil { + return m.NumberDrivesPerNode + } + return 0 +} + +func (m *DriveSelector) GetMatchFields() map[string]string { + if m != nil { + return m.MatchFields + } + return nil +} + func init() { proto.RegisterType((*Drive)(nil), "v1api.Drive") proto.RegisterType((*Volume)(nil), "v1api.Volume") @@ -798,63 +900,73 @@ func init() { proto.RegisterType((*LogicalVolumeGroup)(nil), "v1api.LogicalVolumeGroup") proto.RegisterType((*Node)(nil), "v1api.Node") proto.RegisterMapType((map[string]string)(nil), "v1api.Node.AddressesEntry") + proto.RegisterType((*StorageGroupSpec)(nil), "v1api.StorageGroupSpec") + proto.RegisterType((*DriveSelector)(nil), "v1api.DriveSelector") + proto.RegisterMapType((map[string]string)(nil), "v1api.DriveSelector.MatchFieldsEntry") } func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 845 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x55, 0xcd, 0x6e, 0xdb, 0x46, - 0x10, 0x06, 0xa9, 0x1f, 0x5b, 0x23, 0xc7, 0x89, 0xd7, 0xa9, 0xb1, 0x35, 0x8c, 0x42, 0xe0, 0xa1, - 0xf0, 0x21, 0x10, 0x50, 0xf5, 0xd0, 0xa0, 0xe8, 0xa1, 0xb1, 0xa4, 0x36, 0x44, 0x53, 0x47, 0xa0, - 0xea, 0x1c, 0x0a, 0xf4, 0xb0, 0x26, 0xa7, 0x36, 0x51, 0x4a, 0x64, 0x77, 0x49, 0x05, 0xf4, 0xa5, - 0x7d, 0x86, 0x3e, 0x50, 0x5f, 0xa2, 0xb7, 0x3e, 0x4d, 0x30, 0xbb, 0x2b, 0xfe, 0x48, 0xba, 0xcd, - 0x7c, 0x33, 0xb3, 0x33, 0xfc, 0xf6, 0x1b, 0x2e, 0x0c, 0xf3, 0x32, 0x43, 0x35, 0xce, 0x64, 0x9a, - 0xa7, 0xac, 0xb7, 0xf9, 0x4a, 0x64, 0xb1, 0xf7, 0x5f, 0x17, 0x7a, 0x33, 0x19, 0x6f, 0x90, 0x31, - 0xe8, 0xde, 0xdd, 0xf9, 0x33, 0xee, 0x8c, 0x9c, 0xeb, 0x41, 0xa0, 0x6d, 0xf6, 0x02, 0x3a, 0x1f, - 0xfc, 0x19, 0x77, 0x35, 0x44, 0x26, 0x21, 0x0b, 0x7f, 0xc6, 0x3b, 0x06, 0x59, 0xf8, 0x33, 0xe6, - 0xc1, 0xc9, 0x12, 0x65, 0x2c, 0x92, 0xdb, 0x62, 0x75, 0x8f, 0x92, 0x77, 0x75, 0xa8, 0x85, 0xb1, - 0x0b, 0xe8, 0xbf, 0x45, 0x91, 0xe4, 0x8f, 0xbc, 0xa7, 0xa3, 0xd6, 0xa3, 0x9e, 0xbf, 0x94, 0x19, - 0xf2, 0xbe, 0xe9, 0x49, 0x36, 0x61, 0xcb, 0xf8, 0x09, 0xf9, 0xd1, 0xc8, 0xb9, 0xee, 0x04, 0xda, - 0xa6, 0xfa, 0x65, 0x2e, 0xf2, 0x42, 0xf1, 0x63, 0x53, 0x6f, 0x3c, 0xf6, 0x12, 0x7a, 0x77, 0x4a, - 0x3c, 0x20, 0x1f, 0x68, 0xd8, 0x38, 0x94, 0x7d, 0x9b, 0x46, 0xe8, 0x47, 0x1c, 0x4c, 0xb6, 0xf1, - 0xe8, 0xe4, 0x85, 0xc8, 0x1f, 0xf9, 0xd0, 0x74, 0x23, 0x9b, 0x5d, 0xc1, 0x60, 0xbe, 0x0e, 0x93, - 0x54, 0x15, 0x12, 0xf9, 0x89, 0x0e, 0xd4, 0x80, 0x9e, 0x25, 0x49, 0x73, 0xfe, 0xcc, 0x54, 0x90, - 0x4d, 0x0c, 0xdc, 0x88, 0x92, 0x9f, 0x1a, 0x06, 0x6e, 0x44, 0xc9, 0x2e, 0xe1, 0xf8, 0x87, 0x58, - 0xae, 0x3e, 0x0a, 0x89, 0xfc, 0xb9, 0x86, 0x2b, 0xdf, 0x9c, 0x1f, 0x15, 0x52, 0xac, 0x43, 0xe4, - 0x2f, 0xf4, 0x27, 0xd5, 0x00, 0x55, 0xbe, 0x9b, 0xcf, 0xe8, 0x63, 0x90, 0x9f, 0x99, 0xca, 0xad, - 0x4f, 0x31, 0x5f, 0x2d, 0x4b, 0x95, 0xe3, 0x8a, 0xb3, 0x91, 0x73, 0x7d, 0x1c, 0x54, 0x3e, 0xe3, - 0x70, 0xe4, 0xab, 0x69, 0x82, 0x62, 0xcd, 0xcf, 0x75, 0x68, 0xeb, 0xb2, 0x2f, 0xe1, 0x74, 0x89, - 0x61, 0x21, 0xe3, 0xbc, 0xb4, 0x8c, 0xbd, 0xd4, 0xe7, 0xee, 0xa0, 0xec, 0x15, 0x9c, 0xcd, 0xd7, - 0xa1, 0x2c, 0xb3, 0x3c, 0x4e, 0xd7, 0x53, 0x91, 0x89, 0xfb, 0x04, 0xf9, 0x67, 0x3a, 0x75, 0x3f, - 0xc0, 0xc6, 0xc0, 0x6a, 0x70, 0x41, 0xfa, 0x09, 0xd3, 0x84, 0x5f, 0xe8, 0xf4, 0x03, 0x11, 0xef, - 0xef, 0x0e, 0xf4, 0x3f, 0xa4, 0x49, 0xb1, 0x42, 0x76, 0x0a, 0xae, 0x1f, 0x59, 0x51, 0xb9, 0x7e, - 0xa4, 0x3f, 0x39, 0x0d, 0x05, 0xa5, 0x5b, 0x5d, 0x55, 0x3e, 0x49, 0x69, 0x6b, 0x6b, 0x59, 0x18, - 0x95, 0xb5, 0x30, 0x2d, 0xb7, 0x3c, 0x95, 0xe2, 0x01, 0xa7, 0x89, 0x50, 0xaa, 0x92, 0x5b, 0x03, - 0x6b, 0x08, 0xa0, 0xd7, 0x12, 0xc0, 0x05, 0xf4, 0xdf, 0x7f, 0x5c, 0xa3, 0x54, 0xbc, 0x3f, 0xea, - 0x10, 0x6e, 0xbc, 0x83, 0x92, 0x63, 0xd0, 0xfd, 0x39, 0x8d, 0xd0, 0x0a, 0x4e, 0xdb, 0x95, 0x5c, - 0x07, 0x0d, 0xb9, 0xd6, 0xd2, 0x86, 0x96, 0xb4, 0x5f, 0xc1, 0xd9, 0xfb, 0x0c, 0xa5, 0x1e, 0x5c, - 0x24, 0xf6, 0x2e, 0x8c, 0xf2, 0xf6, 0x03, 0x24, 0x93, 0xe9, 0xd2, 0xb7, 0x59, 0x56, 0x86, 0x15, - 0x50, 0xcb, 0xfc, 0x59, 0x53, 0xe6, 0x24, 0xad, 0xec, 0x11, 0x57, 0x28, 0x45, 0xa2, 0xe5, 0x78, - 0x1c, 0xd4, 0x80, 0xf7, 0x17, 0x9c, 0xbd, 0xd9, 0x88, 0x38, 0xa1, 0xfb, 0xa3, 0x6b, 0x0c, 0xe3, - 0xbc, 0x6c, 0x91, 0xef, 0xec, 0x90, 0x5f, 0x93, 0xe6, 0xb6, 0x48, 0xf3, 0xe0, 0x44, 0x35, 0x09, - 0xb7, 0x97, 0xd2, 0xc4, 0x2a, 0x02, 0xbb, 0x35, 0x81, 0xde, 0xff, 0x0e, 0x5c, 0xed, 0x4d, 0x10, - 0xa0, 0x42, 0xb9, 0x31, 0x0d, 0xaf, 0x60, 0x70, 0x2b, 0x56, 0xa8, 0x32, 0x11, 0xa2, 0x9d, 0xa6, - 0x06, 0x1a, 0x2b, 0xef, 0xb6, 0x56, 0xfe, 0x1b, 0x38, 0xa1, 0xc1, 0x02, 0xfc, 0xb3, 0x40, 0x95, - 0x9b, 0x71, 0x86, 0x93, 0xf3, 0xb1, 0xfe, 0x9d, 0x8d, 0x9b, 0xa1, 0xa0, 0x95, 0xc8, 0x7e, 0x82, - 0xf3, 0x46, 0xf7, 0xaa, 0xbe, 0x3b, 0xea, 0x5c, 0x0f, 0x27, 0x9f, 0xdb, 0xfa, 0xfd, 0x8c, 0xe0, - 0x50, 0x95, 0xf7, 0xb6, 0x3d, 0x05, 0x7d, 0x8b, 0xb5, 0x91, 0xc4, 0x4e, 0xe2, 0xaa, 0x01, 0xa2, - 0xdd, 0x1c, 0x82, 0x44, 0x2e, 0x05, 0x2b, 0xdf, 0x7b, 0x02, 0xb6, 0xdf, 0x80, 0x7d, 0x0f, 0xcf, - 0x6b, 0xca, 0x34, 0xa4, 0x19, 0x1a, 0x4e, 0x2e, 0xec, 0xa0, 0x3b, 0xd1, 0x60, 0x37, 0x9d, 0xae, - 0xad, 0x71, 0xae, 0xb2, 0x7d, 0x5b, 0x98, 0xf7, 0xdb, 0x5e, 0x17, 0xba, 0x49, 0xba, 0x83, 0xed, - 0x2b, 0x40, 0xf6, 0xde, 0xca, 0xb9, 0x07, 0x56, 0x6e, 0xab, 0x80, 0x4e, 0x43, 0x01, 0xff, 0x3a, - 0xc0, 0xde, 0xa5, 0x0f, 0x71, 0x28, 0x12, 0xf3, 0x33, 0xf8, 0x51, 0xa6, 0x45, 0x76, 0xb0, 0x05, - 0x61, 0xb4, 0x6d, 0xae, 0xc5, 0x68, 0xdb, 0xae, 0x60, 0xb0, 0x15, 0x27, 0x5d, 0xb3, 0xe6, 0xb4, - 0x02, 0x0e, 0x49, 0x8e, 0x7d, 0x01, 0x60, 0x1a, 0x05, 0xf8, 0xbb, 0xe2, 0x3d, 0x5d, 0xd2, 0x40, - 0x1a, 0x9a, 0xea, 0xb7, 0x34, 0x55, 0xef, 0xf0, 0x51, 0x73, 0x87, 0xbd, 0x7f, 0x1c, 0x33, 0xd6, - 0xc1, 0xb7, 0xf1, 0x35, 0x0c, 0xde, 0x44, 0x91, 0x44, 0xa5, 0xd0, 0xb0, 0x3b, 0x9c, 0x5c, 0x36, - 0x54, 0x38, 0xae, 0x82, 0xf3, 0x75, 0x2e, 0xcb, 0xa0, 0x4e, 0xbe, 0xfc, 0x0e, 0x4e, 0xdb, 0x41, - 0x7a, 0x53, 0xfe, 0xc0, 0xd2, 0x1e, 0x4f, 0x26, 0xad, 0xfc, 0x46, 0x24, 0xc5, 0x96, 0x11, 0xe3, - 0x7c, 0xeb, 0xbe, 0x76, 0x6e, 0x8e, 0x7e, 0x35, 0x4f, 0xf7, 0x7d, 0x5f, 0x3f, 0xe4, 0x5f, 0x7f, - 0x0a, 0x00, 0x00, 0xff, 0xff, 0xd7, 0x28, 0x73, 0x3e, 0xd7, 0x07, 0x00, 0x00, + // 954 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xcd, 0x6e, 0xe4, 0x44, + 0x10, 0x96, 0xe7, 0x2f, 0x99, 0x9a, 0x24, 0x9b, 0x74, 0x42, 0xd4, 0x44, 0x11, 0x8a, 0x2c, 0x81, + 0x72, 0x58, 0x8d, 0x20, 0x1c, 0x58, 0xad, 0x10, 0x62, 0x93, 0xc9, 0xee, 0x5a, 0xec, 0x66, 0x23, + 0x0f, 0xd9, 0x03, 0xb7, 0x8e, 0x5d, 0x24, 0x16, 0x9e, 0xb1, 0xe9, 0xf6, 0xcc, 0xca, 0x7b, 0x41, + 0xbc, 0x02, 0xcf, 0xc0, 0x73, 0xf0, 0x00, 0x5c, 0xb9, 0xf1, 0x34, 0xa8, 0xba, 0x7b, 0xec, 0xf6, + 0x8c, 0x2f, 0xdc, 0xaa, 0xbe, 0xaa, 0xea, 0x2a, 0x7f, 0xf5, 0xb9, 0x6d, 0x18, 0x15, 0x65, 0x8e, + 0x6a, 0x9c, 0xcb, 0xac, 0xc8, 0x58, 0x7f, 0xf9, 0x95, 0xc8, 0x13, 0xff, 0x9f, 0x1e, 0xf4, 0x27, + 0x32, 0x59, 0x22, 0x63, 0xd0, 0xbb, 0xbb, 0x0b, 0x26, 0xdc, 0x3b, 0xf3, 0xce, 0x87, 0xa1, 0xb6, + 0xd9, 0x3e, 0x74, 0xdf, 0x07, 0x13, 0xde, 0xd1, 0x10, 0x99, 0x84, 0xdc, 0x06, 0x13, 0xde, 0x35, + 0xc8, 0x6d, 0x30, 0x61, 0x3e, 0xec, 0x4c, 0x51, 0x26, 0x22, 0xbd, 0x59, 0xcc, 0xee, 0x51, 0xf2, + 0x9e, 0x0e, 0x35, 0x30, 0x76, 0x0c, 0x83, 0xd7, 0x28, 0xd2, 0xe2, 0x91, 0xf7, 0x75, 0xd4, 0x7a, + 0xd4, 0xf3, 0xc7, 0x32, 0x47, 0x3e, 0x30, 0x3d, 0xc9, 0x26, 0x6c, 0x9a, 0x7c, 0x44, 0xbe, 0x75, + 0xe6, 0x9d, 0x77, 0x43, 0x6d, 0x53, 0xfd, 0xb4, 0x10, 0xc5, 0x42, 0xf1, 0x6d, 0x53, 0x6f, 0x3c, + 0x76, 0x04, 0xfd, 0x3b, 0x25, 0x1e, 0x90, 0x0f, 0x35, 0x6c, 0x1c, 0xca, 0xbe, 0xc9, 0x62, 0x0c, + 0x62, 0x0e, 0x26, 0xdb, 0x78, 0x74, 0xf2, 0xad, 0x28, 0x1e, 0xf9, 0xc8, 0x74, 0x23, 0x9b, 0x9d, + 0xc2, 0xf0, 0x7a, 0x1e, 0xa5, 0x99, 0x5a, 0x48, 0xe4, 0x3b, 0x3a, 0x50, 0x03, 0x7a, 0x96, 0x34, + 0x2b, 0xf8, 0xae, 0xa9, 0x20, 0x9b, 0x18, 0xb8, 0x14, 0x25, 0xdf, 0x33, 0x0c, 0x5c, 0x8a, 0x92, + 0x9d, 0xc0, 0xf6, 0xcb, 0x44, 0xce, 0x3e, 0x08, 0x89, 0xfc, 0x89, 0x86, 0x2b, 0xdf, 0x9c, 0x1f, + 0x2f, 0xa4, 0x98, 0x47, 0xc8, 0xf7, 0xf5, 0x23, 0xd5, 0x00, 0x55, 0xbe, 0xb9, 0x9e, 0xd0, 0xc3, + 0x20, 0x3f, 0x30, 0x95, 0x2b, 0x9f, 0x62, 0x81, 0x9a, 0x96, 0xaa, 0xc0, 0x19, 0x67, 0x67, 0xde, + 0xf9, 0x76, 0x58, 0xf9, 0x8c, 0xc3, 0x56, 0xa0, 0xae, 0x52, 0x14, 0x73, 0x7e, 0xa8, 0x43, 0x2b, + 0x97, 0x7d, 0x01, 0x7b, 0x53, 0x8c, 0x16, 0x32, 0x29, 0x4a, 0xcb, 0xd8, 0x91, 0x3e, 0x77, 0x0d, + 0x65, 0x4f, 0xe1, 0xe0, 0x7a, 0x1e, 0xc9, 0x32, 0x2f, 0x92, 0x6c, 0x7e, 0x25, 0x72, 0x71, 0x9f, + 0x22, 0xff, 0x44, 0xa7, 0x6e, 0x06, 0xd8, 0x18, 0x58, 0x0d, 0xde, 0x92, 0x7e, 0xa2, 0x2c, 0xe5, + 0xc7, 0x3a, 0xbd, 0x25, 0xe2, 0xff, 0xd9, 0x85, 0xc1, 0xfb, 0x2c, 0x5d, 0xcc, 0x90, 0xed, 0x41, + 0x27, 0x88, 0xad, 0xa8, 0x3a, 0x41, 0xac, 0x1f, 0x39, 0x8b, 0x04, 0xa5, 0x5b, 0x5d, 0x55, 0x3e, + 0x49, 0x69, 0x65, 0x6b, 0x59, 0x18, 0x95, 0x35, 0x30, 0x2d, 0xb7, 0x22, 0x93, 0xe2, 0x01, 0xaf, + 0x52, 0xa1, 0x54, 0x25, 0x37, 0x07, 0x73, 0x04, 0xd0, 0x6f, 0x08, 0xe0, 0x18, 0x06, 0xef, 0x3e, + 0xcc, 0x51, 0x2a, 0x3e, 0x38, 0xeb, 0x12, 0x6e, 0xbc, 0x56, 0xc9, 0x31, 0xe8, 0xbd, 0xcd, 0x62, + 0xb4, 0x82, 0xd3, 0x76, 0x25, 0xd7, 0xa1, 0x23, 0xd7, 0x5a, 0xda, 0xd0, 0x90, 0xf6, 0x53, 0x38, + 0x78, 0x97, 0xa3, 0xd4, 0x83, 0x8b, 0xd4, 0xee, 0xc2, 0x28, 0x6f, 0x33, 0x40, 0x32, 0xb9, 0x9a, + 0x06, 0x36, 0xcb, 0xca, 0xb0, 0x02, 0x6a, 0x99, 0xef, 0xba, 0x32, 0x27, 0x69, 0xe5, 0x8f, 0x38, + 0x43, 0x29, 0x52, 0x2d, 0xc7, 0xed, 0xb0, 0x06, 0x1c, 0x9e, 0x5e, 0xc9, 0x6c, 0x91, 0x5b, 0x61, + 0x36, 0x30, 0xff, 0x37, 0x38, 0x78, 0xb1, 0x14, 0x49, 0x4a, 0x3b, 0xa6, 0x55, 0x47, 0x49, 0x51, + 0x36, 0x16, 0xe4, 0xad, 0x2d, 0xa8, 0x26, 0xb6, 0xd3, 0x20, 0xd6, 0x87, 0x1d, 0xe5, 0x2e, 0xc5, + 0x2e, 0xce, 0xc5, 0x2a, 0x92, 0x7b, 0x35, 0xc9, 0xfe, 0xbf, 0x1e, 0x9c, 0x6e, 0x4c, 0x10, 0xa2, + 0x42, 0xb9, 0x34, 0x0d, 0x4f, 0x61, 0x78, 0x23, 0x66, 0xa8, 0x72, 0x11, 0xa1, 0x9d, 0xa6, 0x06, + 0x9c, 0x6b, 0xa1, 0xd3, 0xb8, 0x16, 0xbe, 0x81, 0x1d, 0x1a, 0x2c, 0xc4, 0x5f, 0x17, 0xa8, 0x0a, + 0x33, 0xce, 0xe8, 0xe2, 0x70, 0xac, 0xaf, 0xbc, 0xb1, 0x1b, 0x0a, 0x1b, 0x89, 0xec, 0x07, 0x38, + 0x74, 0xba, 0x57, 0xf5, 0xbd, 0xb3, 0xee, 0xf9, 0xe8, 0xe2, 0x53, 0x5b, 0xbf, 0x99, 0x11, 0xb6, + 0x55, 0xf9, 0xaf, 0x9b, 0x53, 0xd0, 0xb3, 0x58, 0x1b, 0xe9, 0x85, 0x20, 0x01, 0xd6, 0x00, 0xd1, + 0x6e, 0x0e, 0x41, 0x22, 0x97, 0x82, 0x95, 0xef, 0x7f, 0x04, 0xb6, 0xd9, 0x80, 0x7d, 0x0f, 0x4f, + 0x6a, 0xca, 0x34, 0xa4, 0x19, 0x1a, 0x5d, 0x1c, 0xdb, 0x41, 0xd7, 0xa2, 0xe1, 0x7a, 0x3a, 0xad, + 0xcd, 0x39, 0x57, 0xd9, 0xbe, 0x0d, 0xcc, 0xff, 0xdd, 0xdb, 0x68, 0x43, 0xab, 0xa4, 0x25, 0xac, + 0x3e, 0x15, 0x64, 0x6f, 0xbc, 0x97, 0x9d, 0x96, 0xf7, 0x72, 0x25, 0x81, 0xae, 0xf3, 0x9e, 0xad, + 0xeb, 0xb4, 0xd7, 0xa2, 0xd3, 0xbf, 0x3c, 0x60, 0x6f, 0xb2, 0x87, 0x24, 0x12, 0xa9, 0xb9, 0x55, + 0x34, 0xdc, 0x3a, 0x06, 0x61, 0xf4, 0xda, 0x76, 0x2c, 0x46, 0xaf, 0xed, 0x29, 0x0c, 0x57, 0x0a, + 0x26, 0x2d, 0x68, 0xe2, 0x2b, 0xa0, 0x4d, 0x97, 0xec, 0x33, 0x00, 0xd3, 0x28, 0xc4, 0x9f, 0x15, + 0xef, 0xeb, 0x12, 0x07, 0x71, 0x84, 0x37, 0x68, 0x08, 0xaf, 0xbe, 0x0c, 0xb6, 0xdc, 0xcb, 0xc0, + 0xff, 0xc3, 0x33, 0x63, 0xb5, 0x7e, 0x64, 0x9f, 0xc1, 0xf0, 0x45, 0x1c, 0x4b, 0x54, 0x0a, 0xcd, + 0x0a, 0x46, 0x17, 0x27, 0x8e, 0x54, 0xc7, 0x55, 0xf0, 0x7a, 0x5e, 0xc8, 0x32, 0xac, 0x93, 0x4f, + 0xbe, 0x85, 0xbd, 0x66, 0x90, 0x3e, 0x4e, 0xbf, 0x60, 0x69, 0x8f, 0x27, 0x93, 0xee, 0x8e, 0xa5, + 0x48, 0x17, 0x2b, 0x46, 0x8c, 0xf3, 0xbc, 0xf3, 0xcc, 0xf3, 0x6f, 0x60, 0xdf, 0x65, 0x79, 0x9a, + 0x63, 0xc4, 0x9e, 0xc3, 0x6e, 0x4c, 0x7f, 0x03, 0x53, 0x4c, 0x31, 0x2a, 0x32, 0x69, 0x15, 0x75, + 0x64, 0xe7, 0x99, 0xb8, 0xb1, 0xb0, 0x99, 0xea, 0xff, 0xed, 0xc1, 0x6e, 0x23, 0x81, 0x7d, 0x09, + 0x87, 0x73, 0xfd, 0x03, 0xa0, 0x61, 0x75, 0x8b, 0x52, 0xef, 0x86, 0xce, 0xec, 0x87, 0x6d, 0x21, + 0xf6, 0x0a, 0x46, 0x33, 0x51, 0x44, 0x8f, 0x2f, 0x13, 0x4c, 0xe3, 0x15, 0x1b, 0x9f, 0xb7, 0x75, + 0x1f, 0xbf, 0xad, 0xf3, 0x0c, 0x31, 0x6e, 0xe5, 0xc9, 0x77, 0xb0, 0xbf, 0x9e, 0xf0, 0x7f, 0xc8, + 0xb9, 0xdc, 0xfa, 0xc9, 0xfc, 0x20, 0xdd, 0x0f, 0xf4, 0xef, 0xd2, 0xd7, 0xff, 0x05, 0x00, 0x00, + 0xff, 0xff, 0xbd, 0x19, 0x44, 0x54, 0x3d, 0x09, 0x00, 0x00, } diff --git a/api/v1/constants.go b/api/v1/constants.go index f0bac4ab7..13d2a0375 100644 --- a/api/v1/constants.go +++ b/api/v1/constants.go @@ -138,4 +138,7 @@ const ( // CSI Drive taint-like label key and value DriveTaintKey = "drive.csi-baremetal.dell.com/taint" DriveTaintValue = "NoSchedule" + + // CSI StorageGroup label key + StorageGroupLabelKey = "drive.csi-baremetal.dell.com/storage-group" ) diff --git a/api/v1/storagegroupcrd/groupversion_info.go b/api/v1/storagegroupcrd/groupversion_info.go new file mode 100644 index 000000000..3c4736aa2 --- /dev/null +++ b/api/v1/storagegroupcrd/groupversion_info.go @@ -0,0 +1,38 @@ +/* +Copyright © 2020 Dell Inc. or its subsidiaries. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package sgcrd contains API Schema definitions for the StorageGroup v1 API group +// +groupName=csi-baremetal.dell.com +// +versionName=v1 +package sgcrd + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + crScheme "sigs.k8s.io/controller-runtime/pkg/scheme" + + v1 "github.com/dell/csi-baremetal/api/v1" +) + +var ( + // GroupVersionStorageGroup is group version used to register these objects + GroupVersionStorageGroup = schema.GroupVersion{Group: v1.CSICRsGroupVersion, Version: v1.Version} + + // SchemeBuilderStorageGroup is used to add go types to the GroupVersionKind scheme + SchemeBuilderStorageGroup = &crScheme.Builder{GroupVersion: GroupVersionStorageGroup} + + // AddToSchemeStorageGroup adds the types in this group-version to the given scheme. + AddToSchemeStorageGroup = SchemeBuilderStorageGroup.AddToScheme +) diff --git a/api/v1/storagegroupcrd/storagegroup_types.go b/api/v1/storagegroupcrd/storagegroup_types.go new file mode 100644 index 000000000..779e0c935 --- /dev/null +++ b/api/v1/storagegroupcrd/storagegroup_types.go @@ -0,0 +1,58 @@ +/* +Copyright © 2020 Dell Inc. or its subsidiaries. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sgcrd + +import ( + api "github.com/dell/csi-baremetal/api/generated/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:object:root=true + +// StorageGroup is the Schema for the StorageGroups API +// +kubebuilder:resource:scope=Cluster,shortName={sg,sgs} +// +kubebuilder:printcolumn:name="DRIVES_PER_NODE",type="string",JSONPath=".spec.driveSelector.numberDrivesPerNode",description="numberDrivesPerNode of StorageGroup's DriveSelector" +// +kubebuilder:printcolumn:name="TYPE",type="string",JSONPath=".spec.driveSelector.matchFields.Type",description="Drive Type of StorageGroup's DriveSelector" +// +kubebuilder:printcolumn:name="SLOT",type="string",JSONPath=".spec.driveSelector.matchFields.Slot",description="Drive Slot of StorageGroup's DriveSelector" +// +kubebuilder:printcolumn:name="PATH",type="string",JSONPath=".spec.driveSelector.matchFields.Path",description="Drive Path of StorageGroup's DriveSelector" +// +kubebuilder:printcolumn:name="SYSTEM",type="string",JSONPath=".spec.driveSelector.matchFields.IsSystem",description="Whether StorageGroup's DriveSelector to Select System Drive" +type StorageGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec api.StorageGroupSpec `json:"spec,omitempty"` +} + +// +kubebuilder:object:root=true + +// StorageGroupList contains a list of StorageGroup +//+kubebuilder:object:generate=true +type StorageGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StorageGroup `json:"items"` +} + +func init() { + SchemeBuilderStorageGroup.Register(&StorageGroup{}, &StorageGroupList{}) +} + +func (in *StorageGroup) DeepCopyInto(out *StorageGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec +} diff --git a/api/v1/storagegroupcrd/zz_generated.deepcopy.go b/api/v1/storagegroupcrd/zz_generated.deepcopy.go new file mode 100644 index 000000000..40ba2132d --- /dev/null +++ b/api/v1/storagegroupcrd/zz_generated.deepcopy.go @@ -0,0 +1,60 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package sgcrd + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageGroup. +func (in *StorageGroup) DeepCopy() *StorageGroup { + if in == nil { + return nil + } + out := new(StorageGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageGroupList) DeepCopyInto(out *StorageGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageGroupList. +func (in *StorageGroupList) DeepCopy() *StorageGroupList { + if in == nil { + return nil + } + out := new(StorageGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/api/v1/types.proto b/api/v1/types.proto index cb94568df..0ef31e1f0 100644 --- a/api/v1/types.proto +++ b/api/v1/types.proto @@ -46,6 +46,7 @@ message Volume { string Usage = 13; // inline volumes are not support anymore. need to remove field in the next version bool Ephemeral = 14; + string StorageGroup = 15; } message AvailableCapacity { @@ -80,6 +81,7 @@ message CapacityRequest { string Name = 1; string StorageClass = 2; int64 Size = 3; + string StorageGroup = 4; } message LogicalVolumeGroup { @@ -97,3 +99,12 @@ message Node { // key - address type, value - address, align with NodeAddress struct from k8s.io/api/core/v1 map Addresses = 2; } + +message StorageGroupSpec { + DriveSelector driveSelector = 1; +} + +message DriveSelector { + int32 numberDrivesPerNode = 1; + map matchFields = 2; +} diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 30df22753..dec52e4d2 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -40,6 +40,7 @@ import ( accrd "github.com/dell/csi-baremetal/api/v1/availablecapacitycrd" "github.com/dell/csi-baremetal/api/v1/drivecrd" "github.com/dell/csi-baremetal/api/v1/lvgcrd" + sgcrd "github.com/dell/csi-baremetal/api/v1/storagegroupcrd" "github.com/dell/csi-baremetal/api/v1/volumecrd" "github.com/dell/csi-baremetal/pkg/base" "github.com/dell/csi-baremetal/pkg/base/featureconfig" @@ -51,6 +52,7 @@ import ( "github.com/dell/csi-baremetal/pkg/controller" "github.com/dell/csi-baremetal/pkg/controller/capacitycontroller" "github.com/dell/csi-baremetal/pkg/crcontrollers/reservation" + "github.com/dell/csi-baremetal/pkg/crcontrollers/storagegroup" "github.com/dell/csi-baremetal/pkg/metrics" ) @@ -168,6 +170,10 @@ func createManager(ctx context.Context, client *k8s.KubeClient, log *logrus.Logg return nil, err } + if err := sgcrd.AddToSchemeStorageGroup(scheme); err != nil { + return nil, err + } + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, Namespace: *namespace, @@ -196,5 +202,11 @@ func createManager(ctx context.Context, client *k8s.KubeClient, log *logrus.Logg if err = capacityController.SetupWithManager(mgr); err != nil { return nil, err } + + storageGroupController := storagegroup.NewController(client, kubeCache, log) + if err = storageGroupController.SetupWithManager(mgr); err != nil { + return nil, err + } + return mgr, nil } diff --git a/devkit/Dockerfile b/devkit/Dockerfile index 5f7ee36f2..7f09a675d 100644 --- a/devkit/Dockerfile +++ b/devkit/Dockerfile @@ -10,7 +10,7 @@ RUN apt update \ && /kind/kind-build.sh /kind -FROM opensuse/leap:latest +FROM opensuse/leap:15.4 ARG arg_docker_ver ARG arg_go_ver diff --git a/pkg/base/capacityplanner/node_capacity.go b/pkg/base/capacityplanner/node_capacity.go index cf3106731..48df3f27f 100644 --- a/pkg/base/capacityplanner/node_capacity.go +++ b/pkg/base/capacityplanner/node_capacity.go @@ -154,7 +154,7 @@ func (nc *nodeCapacity) selectACForVolume(vol *genV1.Volume) *accrd.AvailableCap } for _, ac := range nc.acsOrder[vol.StorageClass] { - if requiredSize <= nc.acs[ac].Spec.Size { + if requiredSize <= nc.acs[ac].Spec.Size && nc.acs[ac].Labels[v1.StorageGroupLabelKey] == vol.StorageGroup { // check if AC is reserved reservation, ok := nc.reservedACs[ac] diff --git a/pkg/base/k8s/kubeclient.go b/pkg/base/k8s/kubeclient.go index 2f2b0b023..25a7fb7de 100644 --- a/pkg/base/k8s/kubeclient.go +++ b/pkg/base/k8s/kubeclient.go @@ -39,6 +39,7 @@ import ( "github.com/dell/csi-baremetal/api/v1/drivecrd" "github.com/dell/csi-baremetal/api/v1/lvgcrd" "github.com/dell/csi-baremetal/api/v1/nodecrd" + sgcrd "github.com/dell/csi-baremetal/api/v1/storagegroupcrd" "github.com/dell/csi-baremetal/api/v1/volumecrd" "github.com/dell/csi-baremetal/pkg/base" checkErr "github.com/dell/csi-baremetal/pkg/base/error" @@ -227,7 +228,7 @@ func (k *KubeClient) ConstructACRCR(name string, apiACR api.AvailableCapacityRes // ConstructLVGCR constructs LogicalVolumeGroup custom resource from api.LogicalVolumeGroup struct // Receives a name for k8s ObjectMeta and an instance of api.LogicalVolumeGroup struct // Returns an instance of LogicalVolumeGroup CR struct -func (k *KubeClient) ConstructLVGCR(name string, apiLVG api.LogicalVolumeGroup) *lvgcrd.LogicalVolumeGroup { +func (k *KubeClient) ConstructLVGCR(name, storageGroup string, apiLVG api.LogicalVolumeGroup) *lvgcrd.LogicalVolumeGroup { return &lvgcrd.LogicalVolumeGroup{ TypeMeta: apisV1.TypeMeta{ Kind: crdV1.LVGKind, @@ -235,7 +236,7 @@ func (k *KubeClient) ConstructLVGCR(name string, apiLVG api.LogicalVolumeGroup) }, ObjectMeta: apisV1.ObjectMeta{ Name: name, - Labels: constructDefaultAppMap(), + Labels: constructLVGCRLabels(storageGroup), }, Spec: apiLVG, } @@ -409,6 +410,12 @@ func PrepareScheme() (*runtime.Scheme, error) { return nil, err } + // register csi storagegroup crd + err := sgcrd.AddToSchemeStorageGroup(scheme) + if err != nil { + return nil, err + } + return scheme, nil } @@ -420,3 +427,11 @@ func constructDefaultAppMap() (labels map[string]string) { } return } + +func constructLVGCRLabels(storageGroup string) (labels map[string]string) { + labels = constructDefaultAppMap() + if storageGroup != "" { + labels[crdV1.StorageGroupLabelKey] = storageGroup + } + return labels +} diff --git a/pkg/base/k8s/kubeclient_test.go b/pkg/base/k8s/kubeclient_test.go index 659d4bb07..9f5cad1fe 100644 --- a/pkg/base/k8s/kubeclient_test.go +++ b/pkg/base/k8s/kubeclient_test.go @@ -45,6 +45,7 @@ const ( testID = "someID" testNode1Name = "node1" testDriveLocation1 = "drive" + testStorageGroup = "test-group" ) var ( @@ -496,7 +497,7 @@ var _ = Describe("Constructor methods", func() { }) Context("ConstructLVGCR", func() { It("Should return right LogicalVolumeGroup CR", func() { - constructedCR := k8sclient.ConstructLVGCR(testLVGName, testApiLVG) + constructedCR := k8sclient.ConstructLVGCR(testLVGName, "", testApiLVG) Expect(constructedCR.TypeMeta.Kind).To(Equal(testLVGCR.TypeMeta.Kind)) Expect(constructedCR.TypeMeta.APIVersion).To(Equal(testLVGCR.TypeMeta.APIVersion)) Expect(constructedCR.ObjectMeta.Name).To(Equal(testLVGCR.ObjectMeta.Name)) @@ -505,6 +506,18 @@ var _ = Describe("Constructor methods", func() { Expect(constructedCR.Labels).To(Equal(constructDefaultAppMap())) }) }) + Context("ConstructLVGCR with storage group", func() { + It("Should return right LogicalVolumeGroup CR with storage group", func() { + constructedCR := k8sclient.ConstructLVGCR(testLVGName, testStorageGroup, testApiLVG) + Expect(constructedCR.TypeMeta.Kind).To(Equal(testLVGCR.TypeMeta.Kind)) + Expect(constructedCR.TypeMeta.APIVersion).To(Equal(testLVGCR.TypeMeta.APIVersion)) + Expect(constructedCR.ObjectMeta.Name).To(Equal(testLVGCR.ObjectMeta.Name)) + Expect(constructedCR.ObjectMeta.Namespace).To(Equal(testLVGCR.ObjectMeta.Namespace)) + Expect(constructedCR.Labels[apiV1.StorageGroupLabelKey]).To(Equal(testStorageGroup)) + Expect(constructedCR.Spec).To(Equal(testLVGCR.Spec)) + Expect(constructedCR.Labels).To(Equal(constructLVGCRLabels(testStorageGroup))) + }) + }) }) // remove all crds (volume and ac) diff --git a/pkg/common/ac_operations.go b/pkg/common/ac_operations.go index 747907349..792adfc07 100644 --- a/pkg/common/ac_operations.go +++ b/pkg/common/ac_operations.go @@ -33,7 +33,7 @@ import ( // AvailableCapacityOperations is the interface for interact with AvailableCapacity CRs from Controller type AvailableCapacityOperations interface { - RecreateACToLVGSC(ctx context.Context, sc string, acs ...accrd.AvailableCapacity) *accrd.AvailableCapacity + RecreateACToLVGSC(ctx context.Context, sc, sg string, acs ...accrd.AvailableCapacity) *accrd.AvailableCapacity } // ACOperationsImpl is the basic implementation of AvailableCapacityOperations interface @@ -56,7 +56,7 @@ func NewACOperationsImpl(k8sClient *k8s.KubeClient, l *logrus.Logger) *ACOperati // Concerts first AC to LVG SC and set size of remaining to 0 // Receives newSC as string (e.g. HDDLVG) and AvailableCapacities where LVG should be based // Returns created AC or nil -func (a *ACOperationsImpl) RecreateACToLVGSC(ctx context.Context, newSC string, +func (a *ACOperationsImpl) RecreateACToLVGSC(ctx context.Context, newSC, storageGroup string, acs ...accrd.AvailableCapacity) *accrd.AvailableCapacity { ll := a.log.WithFields(logrus.Fields{ "method": "RecreateACToLVGSC", @@ -90,7 +90,7 @@ func (a *ACOperationsImpl) RecreateACToLVGSC(ctx context.Context, newSC string, ) // create LVG CR based on ACs - lvg := a.k8sClient.ConstructLVGCR(name, apiLVG) + lvg := a.k8sClient.ConstructLVGCR(name, storageGroup, apiLVG) if err = a.k8sClient.CreateCR(ctx, name, lvg); err != nil { ll.Errorf("Unable to create LVG CR: %v", err) return nil diff --git a/pkg/common/volume_operations.go b/pkg/common/volume_operations.go index d0f22b497..8784485bd 100644 --- a/pkg/common/volume_operations.go +++ b/pkg/common/volume_operations.go @@ -187,7 +187,7 @@ func (vo *VolumeOperationsImpl) handleVolumeCreation(ctx context.Context, log *l if ac.Spec.StorageClass != v.StorageClass && util.IsStorageClassLVG(v.StorageClass) { // AC needs to be converted to LogicalVolumeGroup AC, LogicalVolumeGroup doesn't exist yet - if ac = vo.acProvider.RecreateACToLVGSC(ctx, v.StorageClass, *ac); ac == nil { + if ac = vo.acProvider.RecreateACToLVGSC(ctx, v.StorageClass, ac.Labels[apiV1.StorageGroupLabelKey], *ac); ac == nil { return nil, status.Errorf(codes.Internal, "unable to prepare underlying storage for storage class %s", v.StorageClass) } @@ -670,7 +670,7 @@ func (vo *VolumeOperationsImpl) fillCache() { } } -// VolumeOperationsImpl returns PVC labels: release, app.kubernetes.io/name and adds short app label +// VolumeOperationsImpl returns PVC labels: release, app.kubernetes.io/name, storagegroup and adds short app label func (vo *VolumeOperationsImpl) getPersistentVolumeClaimLabels(ctx context.Context, pvcName, pvcNamespace string) ( map[string]string, error) { ll := vo.log.WithFields(logrus.Fields{ @@ -686,7 +686,7 @@ func (vo *VolumeOperationsImpl) getPersistentVolumeClaimLabels(ctx context.Conte return nil, err } - // need to get release and app labels only + // need to get release, app and storagegroup labels labels := map[string]string{} if value, ok := pvc.GetLabels()[k8s.ReleaseLabelKey]; ok { labels[k8s.ReleaseLabelKey] = value @@ -695,6 +695,9 @@ func (vo *VolumeOperationsImpl) getPersistentVolumeClaimLabels(ctx context.Conte labels[k8s.AppLabelKey] = value labels[k8s.AppLabelShortKey] = value } + if value, ok := pvc.GetLabels()[apiV1.StorageGroupLabelKey]; ok { + labels[apiV1.StorageGroupLabelKey] = value + } return labels, nil } diff --git a/pkg/common/volume_operations_test.go b/pkg/common/volume_operations_test.go index 84f4ba20a..31f5b1f3b 100644 --- a/pkg/common/volume_operations_test.go +++ b/pkg/common/volume_operations_test.go @@ -74,9 +74,11 @@ func Test_getPersistentVolumeClaimLabels(t *testing.T) { var ( appName = "my-app" releaseName = "my-release" + sgName = "my-group" pvcLabels = map[string]string{ - k8s.AppLabelKey: appName, - k8s.ReleaseLabelKey: releaseName, + k8s.AppLabelKey: appName, + k8s.ReleaseLabelKey: releaseName, + apiV1.StorageGroupLabelKey: sgName, } pvc = &v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: pvcName, Namespace: namespace, Labels: pvcLabels}} @@ -91,6 +93,7 @@ func Test_getPersistentVolumeClaimLabels(t *testing.T) { assert.Equal(t, labels[k8s.AppLabelKey], appName) assert.Equal(t, labels[k8s.AppLabelShortKey], appName) assert.Equal(t, labels[k8s.ReleaseLabelKey], releaseName) + assert.Equal(t, labels[apiV1.StorageGroupLabelKey], sgName) } func TestVolumeOperationsImpl_CreateVolume_VolumeExists(t *testing.T) { diff --git a/pkg/crcontrollers/reservation/reservationcontroller.go b/pkg/crcontrollers/reservation/reservationcontroller.go index bb4588720..0f7ff9ec1 100644 --- a/pkg/crcontrollers/reservation/reservationcontroller.go +++ b/pkg/crcontrollers/reservation/reservationcontroller.go @@ -112,7 +112,7 @@ func (c *Controller) handleReservationUpdate(ctx context.Context, log *logrus.En volumes := make([]*v1api.Volume, len(reservationSpec.ReservationRequests)) for i, request := range reservationSpec.ReservationRequests { capacity := request.CapacityRequest - volumes[i] = &v1api.Volume{Id: capacity.Name, Size: capacity.Size, StorageClass: capacity.StorageClass} + volumes[i] = &v1api.Volume{Id: capacity.Name, Size: capacity.Size, StorageClass: capacity.StorageClass, StorageGroup: capacity.StorageGroup} } // TODO: do not read all ACs and ACRs for each request: https://github.com/dell/csi-baremetal/issues/89 diff --git a/pkg/crcontrollers/storagegroup/storagegroupcontroller.go b/pkg/crcontrollers/storagegroup/storagegroupcontroller.go new file mode 100644 index 000000000..5492a80d8 --- /dev/null +++ b/pkg/crcontrollers/storagegroup/storagegroupcontroller.go @@ -0,0 +1,613 @@ +package storagegroup + +import ( + "context" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "github.com/sirupsen/logrus" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" + + api "github.com/dell/csi-baremetal/api/generated/v1" + apiV1 "github.com/dell/csi-baremetal/api/v1" + accrd "github.com/dell/csi-baremetal/api/v1/availablecapacitycrd" + "github.com/dell/csi-baremetal/api/v1/drivecrd" + sgcrd "github.com/dell/csi-baremetal/api/v1/storagegroupcrd" + errTypes "github.com/dell/csi-baremetal/pkg/base/error" + "github.com/dell/csi-baremetal/pkg/base/k8s" + "github.com/dell/csi-baremetal/pkg/base/util" +) + +const ( + sgFinalizer = "dell.emc.csi/sg-cleanup" + sgTempStatusAnnotationKey = "storagegroup.csi-baremetal.dell.com/status" + contextTimeoutSeconds = 60 +) + +// Controller to reconcile storagegroup custom resource +type Controller struct { + client *k8s.KubeClient + log *logrus.Entry + crHelper k8s.CRHelper + cachedCrHelper k8s.CRHelper +} + +// NewController creates new instance of Controller structure +// Receives an instance of base.KubeClient and logrus logger +// Returns an instance of Controller +func NewController(client *k8s.KubeClient, k8sCache k8s.CRReader, log *logrus.Logger) *Controller { + c := &Controller{ + client: client, + crHelper: k8s.NewCRHelperImpl(client, log), + cachedCrHelper: k8s.NewCRHelperImpl(client, log).SetReader(k8sCache), + log: log.WithField("component", "StorageGroupController"), + } + return c +} + +// SetupWithManager registers Controller to ControllerManager +func (c *Controller) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&sgcrd.StorageGroup{}). + WithOptions(controller.Options{}). + Watches(&source.Kind{Type: &drivecrd.Drive{}}, &handler.EnqueueRequestForObject{}). + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + return c.filterUpdateEvent(e.ObjectOld, e.ObjectNew) + }, + }). + Complete(c) +} + +func (c *Controller) filterUpdateEvent(old runtime.Object, new runtime.Object) bool { + if newDrive, ok := new.(*drivecrd.Drive); ok { + if oldDrive, ok := old.(*drivecrd.Drive); ok { + return filterDriveUpdateEvent(oldDrive, newDrive) + } + } + return true +} + +func filterDriveUpdateEvent(old *drivecrd.Drive, new *drivecrd.Drive) bool { + return old.Labels[apiV1.StorageGroupLabelKey] != new.Labels[apiV1.StorageGroupLabelKey] +} + +// Reconcile reconciles StorageGroup custom resources +func (c *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + ctx, cancelFn := context.WithTimeout(ctx, contextTimeoutSeconds*time.Second) + defer cancelFn() + + // read name + name := req.Name + // customize logging + log := c.log.WithFields(logrus.Fields{"method": "Reconcile", "name": name}) + + drive := &drivecrd.Drive{} + if err := c.client.ReadCR(ctx, name, "", drive); err == nil { + return c.syncDriveStorageGroupLabel(ctx, drive) + } else if !k8serrors.IsNotFound(err) { + log.Errorf("error in reading %s as drive object: %v", name, err) + } + + storageGroup := &sgcrd.StorageGroup{} + if err := c.client.ReadCR(ctx, name, "", storageGroup); err != nil { + if !k8serrors.IsNotFound(err) { + log.Errorf("error in reading %s as drive or storagegroup object: %v", name, err) + } + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + log.Debugf("Reconcile StorageGroup: %v", storageGroup) + + // StorageGroup Deletion request + if !storageGroup.DeletionTimestamp.IsZero() { + return c.handleStorageGroupDeletion(ctx, log, storageGroup) + } + + if !util.ContainsString(storageGroup.Finalizers, sgFinalizer) { + // append finalizer + log.Debugf("Appending finalizer for StorageGroup") + storageGroup.Finalizers = append(storageGroup.Finalizers, sgFinalizer) + if err := c.client.UpdateCR(ctx, storageGroup); err != nil { + log.Errorf("Unable to append finalizer %s to StorageGroup with error: %v.", sgFinalizer, err) + return ctrl.Result{Requeue: true}, err + } + } + + if storageGroup.Annotations == nil { + storageGroup.Annotations = map[string]string{} + } + sgStatus, ok := storageGroup.Annotations[sgTempStatusAnnotationKey] + if !ok { + if !c.isStorageGroupValid(log, storageGroup) { + storageGroup.Annotations[sgTempStatusAnnotationKey] = apiV1.Failed + if err := c.client.UpdateCR(ctx, storageGroup); err != nil { + log.Errorf("Unable to update StorageGroup status with error: %v.", err) + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{}, nil + } + // Pass storage group valiation, change to CREATING status + sgStatus = apiV1.Creating + storageGroup.Annotations[sgTempStatusAnnotationKey] = apiV1.Creating + if err := c.client.UpdateCR(ctx, storageGroup); err != nil { + log.Errorf("Unable to update StorageGroup status with error: %v.", err) + return ctrl.Result{Requeue: true}, err + } + } + + if sgStatus == apiV1.Creating { + return c.handleStorageGroupCreationOrUpdate(ctx, log, storageGroup) + } + + return ctrl.Result{}, nil +} + +// combine the following similar funcs +func (c *Controller) removeACStorageGroupLabel(ctx context.Context, log *logrus.Entry, ac *accrd.AvailableCapacity) error { + delete(ac.Labels, apiV1.StorageGroupLabelKey) + if err := c.client.UpdateCR(ctx, ac); err != nil { + log.Errorf("failed to remove storage-group label from ac %s with error %v", ac.Name, err) + return err + } + return nil +} + +func (c *Controller) removeDriveStorageGroupLabel(ctx context.Context, log *logrus.Entry, drive *drivecrd.Drive) error { + delete(drive.Labels, apiV1.StorageGroupLabelKey) + if err := c.client.UpdateCR(ctx, drive); err != nil { + log.Errorf("failed to remove storage-group label from drive %s with error %v", drive.Name, err) + return err + } + return nil +} + +func (c *Controller) addDriveStorageGroupLabel(ctx context.Context, log *logrus.Entry, drive *drivecrd.Drive, + sgName string) error { + if drive.Labels == nil { + drive.Labels = map[string]string{} + } + drive.Labels[apiV1.StorageGroupLabelKey] = sgName + if err := c.client.UpdateCR(ctx, drive); err != nil { + log.Errorf("failed to add storage group %s label to drive %s with error %v", sgName, drive.Name, err) + return err + } + return nil +} + +func (c *Controller) addACStorageGroupLabel(ctx context.Context, log *logrus.Entry, ac *accrd.AvailableCapacity, + sgName string) error { + if ac.Labels == nil { + ac.Labels = map[string]string{} + } + ac.Labels[apiV1.StorageGroupLabelKey] = sgName + if err := c.client.UpdateCR(ctx, ac); err != nil { + log.Errorf("failed to add storage group %s label to ac %s with error %v", sgName, ac.Name, err) + return err + } + return nil +} + +func (c *Controller) syncDriveOnAllStorageGroups(ctx context.Context, drive *drivecrd.Drive, ac *accrd.AvailableCapacity) (ctrl.Result, error) { + log := c.log.WithFields(logrus.Fields{"method": "syncDriveOnAllStorageGroups", "name": drive.Name}) + + sgList := &sgcrd.StorageGroupList{} + if err := c.client.ReadList(ctx, sgList); err != nil { + log.Errorf("failed to read storage group list: %v", err) + return ctrl.Result{Requeue: true}, err + } + for _, storageGroup := range sgList.Items { + sg := storageGroup + sgStatus, ok := sg.Annotations[sgTempStatusAnnotationKey] + if !ok { + if !c.isStorageGroupValid(log, &sg) { + sg.Annotations[sgTempStatusAnnotationKey] = apiV1.Failed + if err := c.client.UpdateCR(ctx, &sg); err != nil { + log.Errorf("Unable to update StorageGroup status with error: %v", err) + return ctrl.Result{Requeue: true}, err + } + continue + } + // Pass storage group valiation, change to CREATING status + sgStatus = apiV1.Creating + sg.Annotations[sgTempStatusAnnotationKey] = apiV1.Creating + if err := c.client.UpdateCR(ctx, &sg); err != nil { + log.Errorf("Unable to update StorageGroup status with error: %v", err) + return ctrl.Result{Requeue: true}, err + } + } + + if sgStatus != apiV1.Failed && c.isDriveSelectedByValidMatchFields(log, &drive.Spec, &sg.Spec.DriveSelector.MatchFields) { + if sg.Spec.DriveSelector.NumberDrivesPerNode == 0 { + log.Infof("Expect to add label of storagegroup %s to drive %s", sg.Name, drive.Name) + if err := c.addDriveStorageGroupLabel(ctx, log, drive, sg.Name); err != nil { + return ctrl.Result{Requeue: true}, err + } + if err := c.addACStorageGroupLabel(ctx, log, ac, sg.Name); err != nil { + return ctrl.Result{Requeue: true}, err + } + log.Infof("Successfully add label of storagegroup %s to drive %s and its corresponding AC", + sg.Name, drive.Name) + return ctrl.Result{}, nil + } + + log.Debugf("drive %s will probably be selected by storagegroup %s", drive.Name, sg.Name) + if sg.Annotations[sgTempStatusAnnotationKey] != apiV1.Creating { + // trigger the subsequent reconciliation of the potentially-matched storage group + sg.Annotations[sgTempStatusAnnotationKey] = apiV1.Creating + if err := c.client.UpdateCR(ctx, &sg); err != nil { + log.Errorf("Unable to update StorageGroup status with error: %v", err) + return ctrl.Result{Requeue: true}, err + } + } + } + } + return ctrl.Result{}, nil +} + +func (c *Controller) handleManualDriveStorageGroupLabelAddition(ctx context.Context, log *logrus.Entry, + drive *drivecrd.Drive, ac *accrd.AvailableCapacity, driveSGLabel string, lvgExists bool) (ctrl.Result, error) { + if lvgExists { + log.Warnf("We can't add storage group label to drive %s with existing LVG", drive.Name) + if err := c.removeDriveStorageGroupLabel(ctx, log, drive); err != nil { + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{}, nil + } + + volumes, err := c.crHelper.GetVolumesByLocation(ctx, drive.Spec.UUID) + if err != nil { + log.Errorf("Error when getting volumes on drive %s: %v", drive.Name, err) + return ctrl.Result{Requeue: true}, err + } + if len(volumes) > 0 { + log.Warnf("We can't add storage group label to drive %s with existing volumes", drive.Name) + if err = c.removeDriveStorageGroupLabel(ctx, log, drive); err != nil { + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{}, nil + } + + log.Debugf("Also add storage-group %s label to AC %s corresponding to drive %s", driveSGLabel, ac.Name, drive.Name) + if err = c.addACStorageGroupLabel(ctx, log, ac, driveSGLabel); err != nil { + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{}, nil +} + +// Here, we will sync the storage-group label of the drive object if applicable +func (c *Controller) syncDriveStorageGroupLabel(ctx context.Context, drive *drivecrd.Drive) (ctrl.Result, error) { + log := c.log.WithFields(logrus.Fields{"method": "syncDriveStorageGroupLabel", "name": drive.Name}) + + location := drive.Name + lvg, err := c.crHelper.GetLVGByDrive(ctx, drive.Spec.UUID) + if err != nil { + log.Errorf("Error when getting LVG by drive %s: %v", drive.Name, err) + return ctrl.Result{Requeue: true}, err + } + if lvg != nil { + location = lvg.Name + } + + ac, err := c.cachedCrHelper.GetACByLocation(location) + if err != nil { + if err != errTypes.ErrorNotFound { + log.Errorf("Error when getting AC by location %s: %v", location, err) + } + return ctrl.Result{Requeue: true}, err + } + + acSGLabel, acSGLabeled := ac.Labels[apiV1.StorageGroupLabelKey] + driveSGLabel, driveSGLabeled := drive.Labels[apiV1.StorageGroupLabelKey] + if acSGLabel == driveSGLabel { + if !acSGLabeled && !driveSGLabeled && lvg == nil { + volumes, err := c.crHelper.GetVolumesByLocation(ctx, drive.Spec.UUID) + if err != nil { + log.Errorf("Error when getting volumes on drive %s: %v", drive.Name, err) + return ctrl.Result{Requeue: true}, err + } + if len(volumes) == 0 { + return c.syncDriveOnAllStorageGroups(ctx, drive, ac) + } + } + return ctrl.Result{}, nil + } + + // Current manual sg labeling support + log.Debugf("Handle manual change of storage group label of drive %s", drive.Name) + + switch { + // add new storagegroup label to drive + case !acSGLabeled && driveSGLabeled: + return c.handleManualDriveStorageGroupLabelAddition(ctx, log, drive, ac, driveSGLabel, lvg != nil) + + // remove storagegroup label from drive + case acSGLabeled && !driveSGLabeled: + volumes, err := c.crHelper.GetVolumesByLocation(ctx, drive.Spec.UUID) + if err != nil { + log.Errorf("Error when getting volumes on drive %s: %v", drive.Name, err) + return ctrl.Result{Requeue: true}, err + } + if len(volumes) > 0 { + log.Warnf("We can't remove storage group %s label from drive %s with existing volumes", + acSGLabel, drive.Name) + if err := c.addDriveStorageGroupLabel(ctx, log, drive, acSGLabel); err != nil { + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{}, nil + } + + sg := &sgcrd.StorageGroup{} + err = c.client.ReadCR(ctx, acSGLabel, "", sg) + switch { + case err == nil && c.isDriveSelectedByValidMatchFields(log, &drive.Spec, &sg.Spec.DriveSelector.MatchFields): + log.Warnf("We can't remove storage group %s label from drive %s still selected by this storage group", + acSGLabel, drive.Name) + if err := c.addDriveStorageGroupLabel(ctx, log, drive, acSGLabel); err != nil { + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{}, nil + case err != nil && !k8serrors.IsNotFound(err): + log.Errorf("Failed to read StorageGroup %s with error: %v", acSGLabel, err) + return ctrl.Result{Requeue: true}, err + + // the case that the storage-group label removal is valid and we should sync the removal to AC + default: + log.Debugf("Also remove the storage-group %s label of AC %s corresponding to drive %s", acSGLabel, + ac.Name, drive.Name) + if err := c.removeACStorageGroupLabel(ctx, log, ac); err != nil { + return ctrl.Result{Requeue: true}, err + } + } + + // TODO restore the update of storagegroup label of drive + } + + return ctrl.Result{}, nil +} + +func (c *Controller) handleStorageGroupDeletion(ctx context.Context, log *logrus.Entry, + sg *sgcrd.StorageGroup) (ctrl.Result, error) { + drivesList := &drivecrd.DriveList{} + if err := c.client.ReadList(ctx, drivesList); err != nil { + log.Errorf("failed to read drives list: %v", err) + return ctrl.Result{Requeue: true}, err + } + + var labelRemovalErrMsgs []string + for _, drive := range drivesList.Items { + drive := drive + if drive.Labels[apiV1.StorageGroupLabelKey] == sg.Name { + if err := c.removeDriveAndACStorageGroupLabel(ctx, log, &drive, sg); err != nil { + labelRemovalErrMsgs = append(labelRemovalErrMsgs, err.Error()) + } + } + } + if len(labelRemovalErrMsgs) > 0 { + return ctrl.Result{Requeue: true}, fmt.Errorf(strings.Join(labelRemovalErrMsgs, "\n")) + } + return c.removeFinalizer(ctx, log, sg) +} + +func (c *Controller) removeFinalizer(ctx context.Context, log *logrus.Entry, + sg *sgcrd.StorageGroup) (ctrl.Result, error) { + if util.ContainsString(sg.Finalizers, sgFinalizer) { + sg.Finalizers = util.RemoveString(sg.Finalizers, sgFinalizer) + if err := c.client.UpdateCR(ctx, sg); err != nil { + log.Errorf("Unable to remove finalizer %s from StorageGroup with error: %v", sgFinalizer, err) + return ctrl.Result{Requeue: true}, err + } + } + return ctrl.Result{}, nil +} + +func (c *Controller) handleStorageGroupCreationOrUpdate(ctx context.Context, log *logrus.Entry, + sg *sgcrd.StorageGroup) (ctrl.Result, error) { + drivesList := &drivecrd.DriveList{} + if err := c.client.ReadList(ctx, drivesList); err != nil { + log.Errorf("failed to read drives list: %v", err) + return ctrl.Result{Requeue: true}, err + } + noDriveSelected := true + drivesCount := map[string]int32{} + driveSelector := sg.Spec.DriveSelector + + var labelingErrMsgs []string + + // used for candidate drives to be selected by storagegroup with numberDrivesPerNode > 0 + var candidateDrives []*drivecrd.Drive + for _, d := range drivesList.Items { + drive := d + existingStorageGroup, exists := drive.Labels[apiV1.StorageGroupLabelKey] + if exists { + if existingStorageGroup == sg.Name { + log.Debugf("Drive %s has already been selected by current storage group", drive.Name) + noDriveSelected = false + if driveSelector.NumberDrivesPerNode > 0 { + drivesCount[drive.Spec.NodeId]++ + } + } + continue + } + + if c.isDriveSelectedByValidMatchFields(log, &drive.Spec, &driveSelector.MatchFields) && + (driveSelector.NumberDrivesPerNode == 0 || drivesCount[drive.Spec.NodeId] < driveSelector.NumberDrivesPerNode) { + if driveSelector.NumberDrivesPerNode > 0 { + candidateDrives = append(candidateDrives, &drive) + continue + } + + if err := c.addDriveAndACStorageGroupLabel(ctx, log, &drive, sg); err != nil { + labelingErrMsgs = append(labelingErrMsgs, err.Error()) + } + noDriveSelected = false + } + } + + for _, d := range candidateDrives { + drive := d + if drivesCount[drive.Spec.NodeId] < driveSelector.NumberDrivesPerNode { + if err := c.addDriveAndACStorageGroupLabel(ctx, log, drive, sg); err != nil { + labelingErrMsgs = append(labelingErrMsgs, err.Error()) + } + noDriveSelected = false + drivesCount[drive.Spec.NodeId]++ + } + } + + if noDriveSelected { + log.Warnf("No drive can be selected by current storage group %s", sg.Name) + } + if len(labelingErrMsgs) == 0 { + sg.Annotations[sgTempStatusAnnotationKey] = apiV1.Created + if err := c.client.UpdateCR(ctx, sg); err != nil { + log.Errorf("Unable to update StorageGroup status with error: %v.", err) + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{}, nil + } + return ctrl.Result{Requeue: true}, fmt.Errorf(strings.Join(labelingErrMsgs, "\n")) +} + +func (c *Controller) isDriveSelectedByValidMatchFields(log *logrus.Entry, drive *api.Drive, matchFields *map[string]string) bool { + for fieldName, fieldValue := range *matchFields { + driveField := reflect.ValueOf(drive).Elem().FieldByName(fieldName) + switch driveField.Type().String() { + case "string": + if driveField.String() != fieldValue { + return false + } + case "int64": + fieldValueInt64, _ := strconv.ParseInt(fieldValue, 10, 64) + if driveField.Int() != fieldValueInt64 { + return false + } + case "bool": + fieldValueBool, _ := strconv.ParseBool(fieldValue) + if driveField.Bool() != fieldValueBool { + return false + } + default: + // the case of unexpected field type of the field which may be added to drive CR in the future + log.Warnf("unexpected field type %s for field %s with value %s in matchFields", + driveField.Type().String(), fieldName, fieldValue) + return false + } + } + return true +} + +func (c *Controller) isMatchFieldsValid(log *logrus.Entry, matchFields *map[string]string) bool { + for fieldName, fieldValue := range *matchFields { + driveField := reflect.ValueOf(&api.Drive{}).Elem().FieldByName(fieldName) + if !driveField.IsValid() { + log.Warnf("Invalid field %s in driveSelector.matchFields!", fieldName) + return false + } + switch driveField.Type().String() { + case "string": + case "int64": + if _, err := strconv.ParseInt(fieldValue, 10, 64); err != nil { + log.Warnf("Invalid field value %s for field %s. Parsing error: %v", fieldValue, fieldName, err) + return false + } + case "bool": + if _, err := strconv.ParseBool(fieldValue); err != nil { + log.Warnf("Invalid field value %s for field %s. Parsing error: %v", fieldValue, fieldName, err) + return false + } + default: + // the case of unexpected field type of the field which may be added to drive CR in the future + log.Warnf("unexpected field type %s for field %s with value %s in matchFields", + driveField.Type().String(), fieldName, fieldValue) + return false + } + } + return true +} + +// TODO Need more check on whether storagegroup is valid +func (c *Controller) isStorageGroupValid(log *logrus.Entry, sg *sgcrd.StorageGroup) bool { + return c.isMatchFieldsValid(log, &sg.Spec.DriveSelector.MatchFields) +} + +func (c *Controller) removeDriveAndACStorageGroupLabel(ctx context.Context, log *logrus.Entry, drive *drivecrd.Drive, + sg *sgcrd.StorageGroup) error { + log.Debugf("try to remove storagegroup label of drive %s", drive.Name) + volumes, err := c.crHelper.GetVolumesByLocation(ctx, drive.Spec.UUID) + if err != nil { + return err + } + if len(volumes) > 0 { + log.Errorf("Drive %s has existing volumes. Storage group label can't be removed.", drive.Name) + return fmt.Errorf("error in removing storage-group label on drive") + } + + ac, err := c.cachedCrHelper.GetACByLocation(drive.Spec.UUID) + if err != nil { + log.Errorf("Error when getting AC by drive %s: %v", drive.Spec.UUID, err) + return err + } + if err = c.removeDriveStorageGroupLabel(ctx, log, drive); err != nil { + return err + } + if ac.Labels[apiV1.StorageGroupLabelKey] == sg.Name { + if err = c.removeACStorageGroupLabel(ctx, log, ac); err != nil { + return err + } + } else { + log.Warnf("we cannot remove storage-group label of ac %s not included in storage group %s", ac.Name, sg.Name) + } + return nil +} + +func (c *Controller) addDriveAndACStorageGroupLabel(ctx context.Context, log *logrus.Entry, drive *drivecrd.Drive, + sg *sgcrd.StorageGroup) error { + log.Infof("Expect to add label of storagegroup %s to drive %s", sg.Name, drive.Name) + + if lvg, err := c.crHelper.GetLVGByDrive(ctx, drive.Spec.UUID); err != nil || lvg != nil { + if err != nil { + log.Errorf("Error when getting LVG by drive %s: %v", drive.Name, err) + return err + } + log.Warnf("Drive %s has existing LVG and can't be selected by current storage group.", + drive.Name) + return nil + } + + if volumes, err := c.crHelper.GetVolumesByLocation(ctx, drive.Spec.UUID); err != nil || len(volumes) > 0 { + if err != nil { + log.Errorf("Error when getting volumes on drive %s: %v", drive.Name, err) + return err + } + log.Warnf("Drive %s has existing volumes and can't be selected by current storage group.", + drive.Name) + return nil + } + + ac, err := c.cachedCrHelper.GetACByLocation(drive.Spec.UUID) + if err != nil { + log.Errorf("Error when getting AC by drive %s: %v", drive.Spec.UUID, err) + return err + } + // the corresponding ac exists, add storage-group label to the drive and corresponding ac + if err = c.addDriveStorageGroupLabel(ctx, log, drive, sg.Name); err != nil { + return err + } + if err = c.addACStorageGroupLabel(ctx, log, ac, sg.Name); err != nil { + return err + } + log.Infof("Successfully add label of storagegroup %s to drive %s and its corresponding AC", sg.Name, drive.Name) + return nil +} diff --git a/pkg/node/volumemgr.go b/pkg/node/volumemgr.go index f0e878dc3..7a6606086 100644 --- a/pkg/node/volumemgr.go +++ b/pkg/node/volumemgr.go @@ -913,7 +913,7 @@ func (m *VolumeManager) discoverLVGOnSystemDrive() error { VolumeRefs: lvs, Health: apiV1.HealthGood, } - vgCR = m.k8sClient.ConstructLVGCR(vgCRName, vg) + vgCR = m.k8sClient.ConstructLVGCR(vgCRName, "", vg) ctx = context.WithValue(context.Background(), base.RequestUUID, vg.Name) ) m.updateLVGAnnotation(vgCR, vgFreeSpace) diff --git a/pkg/node/volumemgr_test.go b/pkg/node/volumemgr_test.go index f0101fae1..b6a40fb9d 100644 --- a/pkg/node/volumemgr_test.go +++ b/pkg/node/volumemgr_test.go @@ -958,7 +958,7 @@ func TestVolumeManager_handleDriveStatusChange(t *testing.T) { func Test_discoverLVGOnSystemDrive_LVGAlreadyExists(t *testing.T) { var ( m = prepareSuccessVolumeManager(t) - lvgCR = m.k8sClient.ConstructLVGCR("some-name", api.LogicalVolumeGroup{ + lvgCR = m.k8sClient.ConstructLVGCR("some-name", "", api.LogicalVolumeGroup{ Name: "some-name", Node: m.nodeID, Locations: []string{"some-uuid"}, diff --git a/pkg/scheduler/extender/extender.go b/pkg/scheduler/extender/extender.go index aaf112f59..11acf9cc6 100644 --- a/pkg/scheduler/extender/extender.go +++ b/pkg/scheduler/extender/extender.go @@ -280,6 +280,7 @@ func (e *Extender) gatherCapacityRequestsByProvisioner(ctx context.Context, pod requests = append(requests, createRequestFromPVCSpec( generateEphemeralVolumeName(pod.GetName(), v.Name), storageType, + v.Ephemeral.VolumeClaimTemplate.Labels[v1.StorageGroupLabelKey], claimSpec.Resources, ll, )) @@ -330,6 +331,7 @@ func (e *Extender) gatherCapacityRequestsByProvisioner(ctx context.Context, pod requests = append(requests, createRequestFromPVCSpec( pvc.Name, storageType, + pvc.Labels[v1.StorageGroupLabelKey], pvc.Spec.Resources, ll, )) @@ -645,7 +647,7 @@ func generateEphemeralVolumeName(podName, volumeName string) string { return podName + "-" + volumeName } -func createRequestFromPVCSpec(volumeName, storageType string, resourceRequirements coreV1.ResourceRequirements, log *logrus.Entry) *genV1.CapacityRequest { +func createRequestFromPVCSpec(volumeName, storageType, storageGroup string, resourceRequirements coreV1.ResourceRequirements, log *logrus.Entry) *genV1.CapacityRequest { storageReq, ok := resourceRequirements.Requests[coreV1.ResourceStorage] if !ok { log.Errorf("There is no key for storage resource for PVC %s", volumeName) @@ -655,6 +657,7 @@ func createRequestFromPVCSpec(volumeName, storageType string, resourceRequiremen Name: volumeName, StorageClass: util.ConvertStorageClass(storageType), Size: storageReq.Value(), + StorageGroup: storageGroup, } }