diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 00000000..a133e0db Binary files /dev/null and b/.DS_Store differ diff --git a/.dockerignore b/.dockerignore index a9863ed3..6b66114e 100644 --- a/.dockerignore +++ b/.dockerignore @@ -6,4 +6,5 @@ .git bin testbin -vendor \ No newline at end of file +vendor +test diff --git a/Dockerfile b/Dockerfile index c8a0632c..7c90ecef 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,7 +24,7 @@ COPY utils/ utils/ COPY mysqluser/ mysqluser/ # Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -oPg manager cmd/manager/main.go +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager cmd/manager/main.go # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details diff --git a/Dockerfile.sidecar b/Dockerfile.sidecar index 4eac67be..379c386c 100644 --- a/Dockerfile.sidecar +++ b/Dockerfile.sidecar @@ -23,7 +23,11 @@ COPY sidecar/ sidecar/ COPY utils/ utils/ # Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o bin/sidecar cmd/sidecar/main.go +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -a -o bin/sidecar cmd/sidecar/main.go + +# Build mysql checker for mysql conatiner +COPY cmd/mysql/main.go cmd/mysql/main.go +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -a -o bin/mysqlchecker cmd/mysql/main.go ############################################################################### # Docker image for Sidecar @@ -52,4 +56,5 @@ RUN set -ex; \ WORKDIR / COPY --from=builder /workspace/bin/sidecar /usr/local/bin/sidecar +COPY --from=builder /workspace/bin/mysqlchecker /mnt/mysqlchecker ENTRYPOINT ["sidecar"] diff --git a/Makefile b/Makefile index 28be451c..60cdf45c 100644 --- a/Makefile +++ b/Makefile @@ -1,17 +1,14 @@ # Image URL to use all building/pushing image targets -FROM_VERSION ?=v2.2.1 -CHART_VERSION ?=2.2.1 -CHART_TOVERSION ?=2.3.0 -TO_VERSION ?=v2.3.0 +FROM_VERSION ?=v2.3.0 +CHART_VERSION ?=2.3.0 +CHART_TOVERSION ?=3.0.0 +TO_VERSION ?=v3.0.0 +TAG ?=v3.0.0 IMGPREFIX ?=radondb/ -MYSQL_IMAGE_57 ?=5.7.39 -MYSQL_IMAGE_80 ?=8.0.26 -MYSQL_IMAGE_57_TAG ?=$(IMGPREFIX)percona-server:$(MYSQL_IMAGE_57) -MYSQL_IMAGE_80_TAG ?=$(IMGPREFIX)percona-server:$(MYSQL_IMAGE_80) -IMG ?= $(IMGPREFIX)mysql-operator:latest -SIDECAR57_IMG ?= $(IMGPREFIX)mysql57-sidecar:latest -SIDECAR80_IMG ?= $(IMGPREFIX)mysql80-sidecar:latest -XENON_IMG ?= $(IMGPREFIX)xenon:latest +IMG ?= $(IMGPREFIX)mysql-operator:$(TAG) +SIDECAR57_IMG ?= $(IMGPREFIX)mysql57-sidecar:$(TAG) +SIDECAR80_IMG ?= $(IMGPREFIX)mysql80-sidecar:$(TAG) +XENON_IMG ?= $(IMGPREFIX)xenon:$(TAG) GO_PORXY ?= off # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" @@ -56,7 +53,7 @@ update-crd: ## Synchronize the generated YAML files to operator Chart after make make manifests cp config/crd/bases/* charts/mysql-operator/crds/ -generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. +generate: controller-gen generate-go-conversions ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." fmt: ## Run go fmt against code. @@ -64,7 +61,16 @@ fmt: ## Run go fmt against code. vet: ## Run go vet against code. go vet ./... - + +CONVERSION_GEN := $(shell pwd)/bin/conversion-gen +CODE_GENERATOR_VERSION := $(shell awk '/k8s.io\/client-go/ {print substr($$2, 2)}' go.mod) +conversion-gen: ## Donwload conversion-gen locally if necessary. + $(call go-get-tool,$(CONVERSION_GEN),k8s.io/code-generator/cmd/conversion-gen@v$(CODE_GENERATOR_VERSION)) +generate-go-conversions: conversion-gen $(CONVERSION_GEN) ## Generate conversions go code + $(CONVERSION_GEN) \ + --input-dirs=./api/v1beta1 \ + --output-file-base=zz_generated.conversion --output-base=. \ + --go-header-file=./hack/boilerplate.go.txt ENVTEST_ASSETS_DIR=$(shell pwd)/testbin test: manifests generate fmt vet ## Run tests. mkdir -p ${ENVTEST_ASSETS_DIR} @@ -81,15 +87,11 @@ build: generate fmt vet ## Build manager binary. run: manifests generate fmt vet ## Run a controller from your host. go run ./cmd/manager/main.go -docker-build: test ## Build docker image with the manager. +docker-build: ## Build docker image with the manager. docker buildx build --build-arg GO_PROXY=${GO_PORXY} -t ${IMG} . docker buildx build -f Dockerfile.sidecar --build-arg GO_PROXY=${GO_PORXY} -t ${SIDECAR57_IMG} . docker buildx build -f build/xenon/Dockerfile --build-arg GO_PROXY=${GO_PORXY} -t ${XENON_IMG} . docker buildx build --build-arg XTRABACKUP_PKG=percona-xtrabackup-80 --build-arg GO_PROXY=${GO_PORXY} -f Dockerfile.sidecar -t ${SIDECAR80_IMG} . - docker buildx build --build-arg "MYSQL_IMAGE=${MYSQL_IMAGE_57}" --build-arg GO_PROXY=${GO_PORXY} -f build/mysql/Dockerfile -t ${MYSQL_IMAGE_57_TAG} . - docker buildx build --build-arg "MYSQL_IMAGE=${MYSQL_IMAGE_80}" --build-arg GO_PROXY=${GO_PORXY} -f build/mysql/Dockerfile -t ${MYSQL_IMAGE_80_TAG} . -docker-build-mysql57: test ## Build docker image with the manager. - docker buildx build --build-arg "MYSQL_IMAGE=${MYSQL_IMAGE_57}" --build-arg GO_PROXY=${GO_PORXY} -f build/mysql/Dockerfile -t ${MYSQL_IMAGE_57_TAG} . docker-push: ## Push docker image with the manager. docker push ${IMG} docker push ${SIDECAR_IMG} @@ -141,13 +143,20 @@ todo: @grep -Irnw './' -e 'TODO:'|grep -v grep updateVersion: - find ./ -type f -name "*.go" -o -name "*.yaml" -exec sed -i "s/mysql57-sidecar:$(FROM_VERSION)/mysql57-sidecar:$(TO_VERSION)/g" {} \; - find ./ -type f -name "*.go" -o -name "*.yaml" -exec sed -i "s/xenon:$(FROM_VERSION)/xenon:$(TO_VERSION)/g" {} \; - find ./ -type f -name "*.go" -o -name "*.yaml" -exec sed -i "s/mysql-operator:$(FROM_VERSION)/mysql-operator:$(TO_VERSION)/g" {} \; - find ./ -type f -name "*.go" -o -name "*.yaml" -exec sed -i "s/mysql80-sidecar:$(FROM_VERSION)/mysql80-sidecar:$(TO_VERSION)/g" {} \; - find ./ -type f -name "*.go" -o -name "*.yaml" -exec sed -i "s/mysql-operator-$(FROM_VERSION)/mysql-operator-$(TO_VERSION)/g" {} \; - find ./ -type f -name "*.go" -o -name "*.yaml" -exec sed -i "s/\"$(FROM_VERSION)\"/\"$(TO_VERSION)\"/g" {} \; + find ./* -type f -name "*.go" -o -name "*.yaml" -exec sed -i "" "s/mysql57-sidecar:$(FROM_VERSION)/mysql57-sidecar:$(TO_VERSION)/g" {} \; + find ./* -type f -name "*.go" -o -name "*.yaml" -exec sed -i "" "s/xenon:$(FROM_VERSION)/xenon:$(TO_VERSION)/g" {} \; + find ./* -type f -name "*.go" -o -name "*.yaml" -exec sed -i "" "s/mysql-operator:$(FROM_VERSION)/mysql-operator:$(TO_VERSION)/g" {} \; + find ./* -type f -name "*.go" -o -name "*.yaml" -exec sed -i "" "s/mysql80-sidecar:$(FROM_VERSION)/mysql80-sidecar:$(TO_VERSION)/g" {} \; + find ./* -type f -name "*.go" -o -name "*.yaml" -exec sed -i "" "s/mysql-operator-$(FROM_VERSION)/mysql-operator-$(TO_VERSION)/g" {} \; + find ./* -type f -name "*.go" -o -name "*.yaml" -exec sed -i "" "s/\"$(FROM_VERSION)\"/\"$(TO_VERSION)\"/g" {} \; # sed -i "18s/$(CHART_VERSION)/$(CHART_TOVERSION)/" charts/mysql-operator/charts/Chart.yaml - find ./charts -type f -name "*.yaml" -exec sed -i "s/$(CHART_VERSION)/$(CHART_TOVERSION)/g" {} \; - find ./config -type f -name "*.yaml" -exec sed -i "s/$(CHART_VERSION)/$(CHART_TOVERSION)/g" {} \; + find ./charts/* -type f -name "*.yaml" -exec sed -i "" "s/$(CHART_VERSION)/$(CHART_TOVERSION)/g" {} \; + find ./config/* -type f -name "*.yaml" -exec sed -i "" "s/$(CHART_VERSION)/$(CHART_TOVERSION)/g" {} \; +CRD_TO_MARKDOWN := $(shell pwd)/bin/crd-to-markdown +CRD_TO_MARKDOWN_VERSION = 0.0.3 +crd-to-markdown: ## Download crd-to-markdown locally if necessary. + $(call go-get-tool,$(CRD_TO_MARKDOWN),github.com/clamoriniere/crd-to-markdown@v$(CRD_TO_MARKDOWN_VERSION)) +apidoc: crd-to-markdown $(wildcard api/*/*_types.go) + $(CRD_TO_MARKDOWN) --links docs/links.csv -f api/v1beta1/mysqlcluster_types.go -n MySQLCluster > docs/crd_mysqlcluster_v1beta1.md + diff --git a/api/v1alpha1/doc.go b/api/v1alpha1/doc.go new file mode 100644 index 00000000..93e522d2 --- /dev/null +++ b/api/v1alpha1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 diff --git a/api/v1alpha1/mysqlcluster_conversion.go b/api/v1alpha1/mysqlcluster_conversion.go index fd7ebc9b..ba8d2350 100644 --- a/api/v1alpha1/mysqlcluster_conversion.go +++ b/api/v1alpha1/mysqlcluster_conversion.go @@ -1,3 +1,20 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package v1alpha1 func (*MysqlCluster) Hub() {} +func (*Backup) Hub() {} diff --git a/api/v1beta1/backup_types.go b/api/v1beta1/backup_types.go index 5cd0fc39..444febbe 100644 --- a/api/v1beta1/backup_types.go +++ b/api/v1beta1/backup_types.go @@ -28,46 +28,144 @@ import ( type BackupSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file + // ClusterName is the name of the cluster to be backed up. + ClusterName string `json:"clusterName,omitempty"` + // BackupMethod represents the type of backup + BackupMethod string `json:"method,omitempty"` + // Defines details for manual backup Jobs + // +optional + Manual *ManualBackup `json:"manual,omitempty"` + // Backup Schedule + // +optional + BackupSchedule *BackupSchedule `json:"schedule,omitempty"` + // Backup Storage + BackupOpts BackupOps `json:"backupops,omitempty"` +} - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file +type BackupOps struct { + // BackupHost + // +optional + BackupHost string `json:"host,omitempty"` + S3 *S3 `json:"s3,omitempty"` + NFS *NFS `json:"nfs,omitempty"` +} - // To specify the image that will be used for sidecar container. +type S3 struct { + // S3 Bucket // +optional - // +kubebuilder:default:="radondb/mysql57-sidecar:v2.3.0" - Image string `json:"image"` + BackupSecretName string `json:"secretName,omitempty"` +} - // HostFrom represents the host for which to take backup - // If is empty, is use leader HostFrom - HostFrom string `json:"hostfrom,omitempty"` +type NFS struct { + // Defines a Volume for backup MySQL data. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + Volume corev1.NFSVolumeSource `json:"volume,omitempty"` +} - // Represents the ip address of the nfs server. +type ManualBackup struct { + BackupType string `json:"type,omitempty"` + // Backup Retention // +optional - NFSServerAddress string `json:"nfsServerAddress,omitempty"` - - // ClusterName represents the cluster name to backup - ClusterName string `json:"clusterName"` + // +kubebuilder:default:=7 + BackupRetention *int32 `json:"backupRetention,omitempty"` +} +type BackupSchedule struct { + // Cron expression for backup schedule + // +optional + CronExpression string `json:"cronExpression,omitempty"` + // Backup Retention + // +optional + BackupRetention *int32 `json:"backupRetention,omitempty"` + BackupType string `json:"type,omitempty"` // History Limit of job // +optional - // +kubebuilder:default:=3 - HistoryLimit *int32 `json:"historyLimit,omitempty"` + BackupJobHistoryLimit *int32 `json:"jobhistoryLimit,omitempty"` } -// BackupStatus defines the observed state of Backup type BackupStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file - // +kubebuilder:default:=false - Completed bool `json:"completed"` - // Get the backup path. + Type BackupInitiator `json:"type,omitempty"` + BackupName string `json:"backupName,omitempty"` + BackupSize string `json:"backupSize,omitempty"` + BackupType string `json:"backupType,omitempty"` + StartTime *metav1.Time `json:"startTime,omitempty"` + CompletionTime *metav1.Time `json:"completionTime,omitempty"` + State BackupConditionType `json:"state,omitempty"` + ManualBackup *ManualBackupStatus `json:"manual,omitempty"` + ScheduledBackups []ScheduledBackupStatus `json:"scheduled,omitempty"` +} + +type BackupConditionType string + +const ( + // BackupComplete means the backup has finished his execution + BackupSucceeded BackupConditionType = "Succeeded" + // BackupFailed means backup has failed + BackupFailed BackupConditionType = "Failed" + BackupStart BackupConditionType = "Started" + BackupActive BackupConditionType = "Active" +) + +type BackupInitiator string + +const ( + CronJobBackupInitiator BackupInitiator = "CronJob" + ManualBackupInitiator BackupInitiator = "Manual" +) + +type ManualBackupStatus struct { + // Specifies whether or not the Job is finished executing (does not indicate success or + // failure). + // +kubebuilder:validation:Required + Finished bool `json:"finished"` BackupName string `json:"backupName,omitempty"` // Get the backup Date - BackupDate string `json:"backupDate,omitempty"` + StartTime *metav1.Time `json:"startTime,omitempty"` + // Get the backup Type + CompletionTime *metav1.Time `json:"completionTime,omitempty"` + // Conditions represents the backup resource conditions list. + // +optional + Succeeded int32 `json:"succeeded,omitempty"` + // +optional + // The number of actively running manual backup Pods. + // +optional + Active int32 `json:"active,omitempty"` + Failed int32 `json:"failed,omitempty"` + Reason string `json:"reason"` // Get the backup Type BackupType string `json:"backupType,omitempty"` + // Get the backup Size + BackupSize string `json:"backupSize,omitempty"` + // Get current backup status + State BackupConditionType `json:"state,omitempty"` +} + +type ScheduledBackupStatus struct { + // The name of the associated scheduled backup CronJob + // +kubebuilder:validation:Required + CronJobName string `json:"cronJobName,omitempty"` + // Get the backup path. + BackupName string `json:"backupName,omitempty"` + // Specifies whether or not the Job is finished executing (does not indicate success or + // failure). + // +kubebuilder:validation:Required + Finished bool `json:"finished"` + // Get the backup Type + BackupType string `json:"backupType,omitempty"` + // Get the backup Date + StartTime *metav1.Time `json:"startTime,omitempty"` + // Get the backup Type + CompletionTime *metav1.Time `json:"completionTime,omitempty"` // Conditions represents the backup resource conditions list. - Conditions []BackupCondition `json:"conditions,omitempty"` + // +optional + Succeeded int32 `json:"succeeded,omitempty"` + // +optional + Failed int32 `json:"failed,omitempty"` + Reason string `json:"reason"` + // Get the backup Size + BackupSize string `json:"backupSize,omitempty"` + // Get current backup status + State BackupConditionType `json:"state,omitempty"` } // +kubebuilder:object:root=true @@ -75,9 +173,13 @@ type BackupStatus struct { // +kubebuilder:storageversion // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="BackupName",type="string",JSONPath=".status.backupName",description="The Backup name" -// +kubebuilder:printcolumn:name="BackupDate",type="string",JSONPath=".status.backupDate",description="The Backup Date time" +// +kubebuilder:printcolumn:name="StartTime",type="string",JSONPath=".status.startTime",description="The Backup Start time" +// +kubebuilder:printcolumn:name="completionTime",type="string",JSONPath=".status.completionTime",description="The Backup CompletionTime time" // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".status.backupType",description="The Backup Type" -// +kubebuilder:printcolumn:name="Success",type="string",JSONPath=".status.conditions[?(@.type==\"Complete\")].status",description="Whether the backup Success?" +// +kubebuilder:printcolumn:name="Initiator",type="string",JSONPath=".status.type",description="The Backup Initiator" +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state",description="The Backup State" +// +kubebuilder:printcolumn:name="Size",type="string",JSONPath=".status.backupSize",description="The Backup State" + // Backup is the Schema for the backups API type Backup struct { metav1.TypeMeta `json:",inline"` @@ -87,31 +189,6 @@ type Backup struct { Status BackupStatus `json:"status,omitempty"` } -// BackupCondition defines condition struct for backup resource -type BackupCondition struct { - // type of cluster condition, values in (\"Ready\") - Type BackupConditionType `json:"type"` - // Status of the condition, one of (\"True\", \"False\", \"Unknown\") - Status corev1.ConditionStatus `json:"status"` - // LastTransitionTime - LastTransitionTime metav1.Time `json:"lastTransitionTime"` - // Reason - Reason string `json:"reason"` - // Message - Message string `json:"message"` -} - -// BackupConditionType defines condition types of a backup resources -type BackupConditionType string - -const ( - // BackupComplete means the backup has finished his execution - BackupComplete BackupConditionType = "Complete" - // BackupFailed means backup has failed - BackupFailed BackupConditionType = "Failed" - BackupStart BackupConditionType = "Started" -) - //+kubebuilder:object:root=true // BackupList contains a list of Backup diff --git a/api/v1beta1/doc.go b/api/v1beta1/doc.go new file mode 100644 index 00000000..e462594e --- /dev/null +++ b/api/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=github.com/radondb/radondb-mysql-kubernetes/api/v1alpha1 +package v1beta1 diff --git a/api/v1beta1/groupversion_info.go b/api/v1beta1/groupversion_info.go index 994ea1a2..a039894c 100644 --- a/api/v1beta1/groupversion_info.go +++ b/api/v1beta1/groupversion_info.go @@ -32,5 +32,6 @@ var ( SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme + AddToScheme = SchemeBuilder.AddToScheme + localSchemeBuilder = &SchemeBuilder.SchemeBuilder ) diff --git a/api/v1beta1/mysqlcluster_conversion.go b/api/v1beta1/mysqlcluster_conversion.go index 68efb6e3..3e961a6e 100644 --- a/api/v1beta1/mysqlcluster_conversion.go +++ b/api/v1beta1/mysqlcluster_conversion.go @@ -1,35 +1,173 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package v1beta1 import ( "unsafe" "github.com/radondb/radondb-mysql-kubernetes/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" ) var _ conversion.Convertible = &MysqlCluster{} +var _ conversion.Convertible = &Backup{} + func (src *MysqlCluster) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*v1alpha1.MysqlCluster) - // ObjectMeta - dst.ObjectMeta = src.ObjectMeta - dst.Spec.MysqlVersion = src.Spec.MysqlVersion - dst.Status.Conditions = *(*[]v1alpha1.ClusterCondition)(unsafe.Pointer(&src.Status.Conditions)) - - dst.Status.ReadyNodes = src.Status.ReadyNodes - dst.Status.State = v1alpha1.ClusterState(src.Status.State) - dst.Status.Nodes = *(*[]v1alpha1.NodeStatus)(unsafe.Pointer(&src.Status.Nodes)) - return nil + return Convert_v1beta1_MysqlCluster_To_v1alpha1_MysqlCluster(src, dst, nil) } + func (dst *MysqlCluster) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*v1alpha1.MysqlCluster) - // ObjectMeta - dst.ObjectMeta = src.ObjectMeta - dst.Spec.MysqlVersion = src.Spec.MysqlVersion - - dst.Status.Conditions = *(*[]ClusterCondition)(unsafe.Pointer(&src.Status.Conditions)) - dst.Status.ReadyNodes = src.Status.ReadyNodes - dst.Status.State = ClusterState(src.Status.State) - dst.Status.Nodes = *(*[]NodeStatus)(unsafe.Pointer(&src.Status.Nodes)) + return Convert_v1alpha1_MysqlCluster_To_v1beta1_MysqlCluster(src, dst, nil) +} + +func (src *Backup) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v1alpha1.Backup) + return Convert_v1beta1_Backup_To_v1alpha1_Backup(src, dst, nil) +} + +func (dst *Backup) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v1alpha1.Backup) + return Convert_v1alpha1_Backup_To_v1beta1_Backup(src, dst, nil) +} + +func Convert_v1alpha1_MysqlClusterSpec_To_v1beta1_MysqlClusterSpec(in *v1alpha1.MysqlClusterSpec, out *MysqlClusterSpec, s apiconversion.Scope) error { + if err := autoConvert_v1alpha1_MysqlClusterSpec_To_v1beta1_MysqlClusterSpec(in, out, s); err != nil { + return err + } + // if err := Convert_v1alpha1_MysqlOpts_To_v1beta1_MysqlConfig(in, out, s); err != nil { + // return err + // } + //TODO in.MysqlOpts.Database in.MysqlOpts.InitTokuDB + out.Image = in.MysqlOpts.Image + out.MaxLagSeconds = in.MysqlOpts.MaxLagSeconds + out.MySQLConfig.ConfigMapName = in.MysqlOpts.MysqlConfTemplate + out.MySQLConfig.MysqlConfig = *(*map[string]string)(unsafe.Pointer(&in.MysqlOpts.MysqlConf)) + out.MySQLConfig.PluginConfig = *(*map[string]string)(unsafe.Pointer(&in.MysqlOpts.PluginConf)) + //TODO in.MysqlOpts.Password in.MysqlOpts.PluginConf in.MysqlOpts.RootHost + out.Resources = in.MysqlOpts.Resources + out.User = in.MysqlOpts.User + out.Xenon = XenonOpts(in.XenonOpts) + out.Monitoring.Exporter.Image = in.MetricsOpts.Image + out.Monitoring.Exporter.Enabled = in.MetricsOpts.Enabled + out.Monitoring.Exporter.Resources = in.MetricsOpts.Resources + out.Affinity = (*corev1.Affinity)(unsafe.Pointer(in.PodPolicy.Affinity)) + out.ImagePullPolicy = in.PodPolicy.ImagePullPolicy + out.Backup.Image = in.PodPolicy.SidecarImage + out.Backup.Resources = in.PodPolicy.ExtraResources + out.Log.SlowLogTail = in.PodPolicy.SlowLogTail + out.Tolerations = in.PodPolicy.Tolerations + out.PriorityClassName = in.PodPolicy.PriorityClassName + out.Log.BusyboxImage = in.PodPolicy.BusyboxImage + out.Log.Resources = in.PodPolicy.ExtraResources + out.Storage.AccessModes = in.Persistence.AccessModes + out.Storage.Resources.Requests = map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: resource.MustParse(in.Persistence.Size), + } + out.DataSource.S3Backup.Name = in.RestoreFrom + out.DataSource.S3Backup.SecretName = in.BackupSecretName + if in.TlsSecretName != "" { + out.CustomTLSSecret = &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: in.TlsSecretName, + }, + } + } + + //TODO in.Backup + + return nil +} + +func Convert_v1beta1_MysqlClusterSpec_To_v1alpha1_MysqlClusterSpec(in *MysqlClusterSpec, out *v1alpha1.MysqlClusterSpec, s apiconversion.Scope) error { + if err := autoConvert_v1beta1_MysqlClusterSpec_To_v1alpha1_MysqlClusterSpec(in, out, s); err != nil { + return err + } + out.MysqlOpts.User = in.User + out.MysqlOpts.MysqlConfTemplate = in.MySQLConfig.ConfigMapName + out.MysqlOpts.MysqlConf = in.MySQLConfig.MysqlConfig + out.MysqlOpts.PluginConf = in.MySQLConfig.PluginConfig + out.MysqlOpts.Resources = in.Resources + if in.CustomTLSSecret != nil { + out.TlsSecretName = in.CustomTLSSecret.Name + } + out.Persistence.StorageClass = in.Storage.StorageClassName + out.Persistence.Size = FormatQuantity(in.Storage.Resources.Requests[corev1.ResourceStorage]) + out.Persistence.AccessModes = in.Storage.AccessModes + out.XenonOpts = v1alpha1.XenonOpts(in.Xenon) + // //TODO in.Backup + out.PodPolicy.ExtraResources = in.Backup.Resources + out.PodPolicy.SidecarImage = in.Backup.Image + out.MetricsOpts.Image = in.Monitoring.Exporter.Image + out.MetricsOpts.Resources = in.Monitoring.Exporter.Resources + out.MysqlOpts.Image = in.Image + out.PodPolicy.SlowLogTail = in.Log.SlowLogTail + out.PodPolicy.BusyboxImage = in.Log.BusyboxImage + out.MetricsOpts.Enabled = in.Monitoring.Exporter.Enabled + out.PodPolicy.ImagePullPolicy = in.ImagePullPolicy + out.PodPolicy.Tolerations = in.Tolerations + out.PodPolicy.Affinity = (*corev1.Affinity)(unsafe.Pointer(in.Affinity)) + out.PodPolicy.PriorityClassName = in.PriorityClassName + //TODO in.DataSource in.Standby + out.XenonOpts.EnableAutoRebuild = in.EnableAutoRebuild + + out.RestoreFrom = in.DataSource.S3Backup.Name + out.BackupSecretName = in.DataSource.S3Backup.SecretName + + //TODO in.Log n.Service + return nil +} + +func Convert_v1alpha1_BackupSpec_To_v1beta1_BackupSpec(in *v1alpha1.BackupSpec, out *BackupSpec, s apiconversion.Scope) error { + if err := autoConvert_v1alpha1_BackupSpec_To_v1beta1_BackupSpec(in, out, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_BackupSpec_To_v1alpha1_BackupSpec(in *BackupSpec, out *v1alpha1.BackupSpec, s apiconversion.Scope) error { + if err := autoConvert_v1beta1_BackupSpec_To_v1alpha1_BackupSpec(in, out, s); err != nil { + return err + } return nil } + +func Convert_v1beta1_BackupStatus_To_v1alpha1_BackupStatus(in *BackupStatus, out *v1alpha1.BackupStatus, s apiconversion.Scope) error { + if err := autoConvert_v1beta1_BackupStatus_To_v1alpha1_BackupStatus(in, out, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_BackupStatus_To_v1beta1_BackupStatus(in *v1alpha1.BackupStatus, out *BackupStatus, s apiconversion.Scope) error { + if err := autoConvert_v1alpha1_BackupStatus_To_v1beta1_BackupStatus(in, out, s); err != nil { + return err + } + return nil +} + +func FormatQuantity(q resource.Quantity) string { + if q.IsZero() { + return "" + } + return q.String() +} diff --git a/api/v1beta1/mysqlcluster_types.go b/api/v1beta1/mysqlcluster_types.go index be779c21..313d1832 100644 --- a/api/v1beta1/mysqlcluster_types.go +++ b/api/v1beta1/mysqlcluster_types.go @@ -44,7 +44,7 @@ type MysqlClusterSpec struct { // MySQLConfig `ConfigMap` name of MySQL config. // +optional - MySQLConfig *string `json:"mysqlConfig,omitempty"` + MySQLConfig MySQLConfigs `json:"mysqlConfig,omitempty"` //Compute resources of a MySQL container. Resources corev1.ResourceRequirements `json:"resources,omitempty"` @@ -65,12 +65,6 @@ type MysqlClusterSpec struct { // +kubebuilder:default:="5.7" MysqlVersion string `json:"mysqlVersion,omitempty"` - // DatabaseInitSQL defines a ConfigMap containing custom SQL that will - // be run after the cluster is initialized. This ConfigMap must be in the same - // namespace as the cluster. - // +optional - DatabaseInitSQL *DatabaseInitSQL `json:"databaseInitSQL,omitempty"` - // XenonOpts is the options of xenon container. // +optional // +kubebuilder:default:={image: "radondb/xenon:v2.3.0", admitDefeatHearbeatCount: 5, electionTimeout: 10000, resources: {limits: {cpu: "100m", memory: "256Mi"}, requests: {cpu: "50m", memory: "128Mi"}}} @@ -82,7 +76,19 @@ type MysqlClusterSpec struct { // Monitoring is the options of metrics container. // +optional - Monitoring MonitoringSpec `json:"MonitoringSpec,omitempty"` + Monitoring MonitoringSpec `json:"monitoringSpec,omitempty"` + + // Specifies mysql image to use. + // +optional + // +kubebuilder:default:="percona/percona-server:5.7.34" + Image string `json:"image,omitempty"` + + // MaxLagSeconds configures the readiness probe of mysqld container + // if the replication lag is greater than MaxLagSeconds, the mysqld container will not be not healthy. + // +kubebuilder:default:=30 + // +kubebuilder:validation:Minimum=0 + // +optional + MaxLagSeconds int `json:"maxLagTime,omitempty"` // ImagePullPolicy is used to determine when Kubernetes will attempt to // pull (download) container images. @@ -106,7 +112,7 @@ type MysqlClusterSpec struct { // MySQL to restart. // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ // +optional - PriorityClassName *string `json:"priorityClassName,omitempty"` + PriorityClassName string `json:"priorityClassName,omitempty"` // The number of pods from that set that must still be available after the // eviction, even in the absence of the evicted pod @@ -116,7 +122,7 @@ type MysqlClusterSpec struct { // Specifies a data source for bootstrapping the MySQL cluster. // +optional - DataSource *DataSource `json:"dataSource,omitempty"` + DataSource DataSource `json:"dataSource,omitempty"` // Run this cluster as a read-only copy of an existing cluster or archive. // +optional @@ -140,6 +146,17 @@ type MysqlClusterSpec struct { Service *ServiceSpec `json:"service,omitempty"` } +type MySQLConfigs struct { + // Name of the `ConfigMap` containing MySQL config. + // +optional + ConfigMapName string `json:"configMapName,omitempty"` + + // A map[string]string that will be passed to my.cnf file. + // The key/value pairs is persisted in the configmap. + MysqlConfig map[string]string `json:"myCnf,omitempty"` + PluginConfig map[string]string `json:"pluginCnf,omitempty"` +} + type BackupOpts struct { // Image is the image of backup container. // +optional @@ -183,7 +200,7 @@ type MysqlCluster struct { Status MysqlClusterStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // MysqlClusterList contains a list of MysqlCluster type MysqlClusterList struct { metav1.TypeMeta `json:",inline"` @@ -299,19 +316,6 @@ const ( NodeConditionReplicating NodeConditionType = "Replicating" ) -// DatabaseInitSQL defines a ConfigMap containing custom SQL that will -// be run after the cluster is initialized. This ConfigMap must be in the same -// namespace as the cluster. -type DatabaseInitSQL struct { - // Name is the name of a ConfigMap - // +required - Name string `json:"name"` - - // Key is the ConfigMap data key that points to a SQL string - // +required - Key string `json:"key"` -} - type XenonOpts struct { // To specify the image that will be used for xenon container. // +optional @@ -341,7 +345,7 @@ type XenonOpts struct { type MonitoringSpec struct { // +optional - Exporter *ExporterSpec `json:"exporter,omitempty"` + Exporter ExporterSpec `json:"exporter,omitempty"` } type ExporterSpec struct { @@ -360,12 +364,19 @@ type ExporterSpec struct { // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers // +optional Resources corev1.ResourceRequirements `json:"resources,omitempty"` + // enabled is used to enable/disable the exporter. + // +optional + // +kubebuilder:default:=true + Enabled bool `json:"enabled,omitempty"` } type DataSource struct { // Bootstraping from remote data source // +optional - Remote *RemoteDataSource `json:"remote,omitempty"` + Remote RemoteDataSource `json:"remote,omitempty"` + // Bootstraping from backup + // +optional + S3Backup S3BackupDataSource `json:"S3backup,omitempty"` } type RemoteDataSource struct { @@ -373,10 +384,34 @@ type RemoteDataSource struct { SourceConfig *corev1.SecretProjection `json:"sourceConfig,omitempty"` } +type S3BackupDataSource struct { + // Backup name + // +optional + Name string `json:"name"` + // Secret name + // +optional + SecretName string `json:"secretName"` +} type LogOpts struct { // To specify the image that will be used for log container. // +optional + // The busybox image. + // +optional + // +kubebuilder:default:="busybox:1.32" + BusyboxImage string `json:"image,omitempty"` + // SlowLogTail represents if tail the mysql slow log. + // +optional + // +kubebuilder:default:=false + SlowLogTail bool `json:"slowLogTail,omitempty"` + + // AuditLogTail represents if tail the mysql audit log. + // +optional + // +kubebuilder:default:=false + AuditLogTail bool `json:"auditLogTail,omitempty"` + + //Log container resources of a MySQL container. + Resources corev1.ResourceRequirements `json:"resources,omitempty"` } type MySQLStandbySpec struct { diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go new file mode 100644 index 00000000..00d1e6c3 --- /dev/null +++ b/api/v1beta1/zz_generated.conversion.go @@ -0,0 +1,563 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + v1alpha1 "github.com/radondb/radondb-mysql-kubernetes/api/v1alpha1" + v1 "k8s.io/api/core/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*Backup)(nil), (*v1alpha1.Backup)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Backup_To_v1alpha1_Backup(a.(*Backup), b.(*v1alpha1.Backup), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.Backup)(nil), (*Backup)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Backup_To_v1beta1_Backup(a.(*v1alpha1.Backup), b.(*Backup), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*BackupList)(nil), (*v1alpha1.BackupList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BackupList_To_v1alpha1_BackupList(a.(*BackupList), b.(*v1alpha1.BackupList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.BackupList)(nil), (*BackupList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupList_To_v1beta1_BackupList(a.(*v1alpha1.BackupList), b.(*BackupList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClusterCondition)(nil), (*v1alpha1.ClusterCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ClusterCondition_To_v1alpha1_ClusterCondition(a.(*ClusterCondition), b.(*v1alpha1.ClusterCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCondition)(nil), (*ClusterCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClusterCondition_To_v1beta1_ClusterCondition(a.(*v1alpha1.ClusterCondition), b.(*ClusterCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MysqlCluster)(nil), (*v1alpha1.MysqlCluster)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MysqlCluster_To_v1alpha1_MysqlCluster(a.(*MysqlCluster), b.(*v1alpha1.MysqlCluster), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.MysqlCluster)(nil), (*MysqlCluster)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_MysqlCluster_To_v1beta1_MysqlCluster(a.(*v1alpha1.MysqlCluster), b.(*MysqlCluster), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MysqlClusterList)(nil), (*v1alpha1.MysqlClusterList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MysqlClusterList_To_v1alpha1_MysqlClusterList(a.(*MysqlClusterList), b.(*v1alpha1.MysqlClusterList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.MysqlClusterList)(nil), (*MysqlClusterList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_MysqlClusterList_To_v1beta1_MysqlClusterList(a.(*v1alpha1.MysqlClusterList), b.(*MysqlClusterList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*MysqlClusterStatus)(nil), (*v1alpha1.MysqlClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MysqlClusterStatus_To_v1alpha1_MysqlClusterStatus(a.(*MysqlClusterStatus), b.(*v1alpha1.MysqlClusterStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.MysqlClusterStatus)(nil), (*MysqlClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_MysqlClusterStatus_To_v1beta1_MysqlClusterStatus(a.(*v1alpha1.MysqlClusterStatus), b.(*MysqlClusterStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NodeCondition)(nil), (*v1alpha1.NodeCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_NodeCondition_To_v1alpha1_NodeCondition(a.(*NodeCondition), b.(*v1alpha1.NodeCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.NodeCondition)(nil), (*NodeCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NodeCondition_To_v1beta1_NodeCondition(a.(*v1alpha1.NodeCondition), b.(*NodeCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NodeStatus)(nil), (*v1alpha1.NodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_NodeStatus_To_v1alpha1_NodeStatus(a.(*NodeStatus), b.(*v1alpha1.NodeStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.NodeStatus)(nil), (*NodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NodeStatus_To_v1beta1_NodeStatus(a.(*v1alpha1.NodeStatus), b.(*NodeStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*RaftStatus)(nil), (*v1alpha1.RaftStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_RaftStatus_To_v1alpha1_RaftStatus(a.(*RaftStatus), b.(*v1alpha1.RaftStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.RaftStatus)(nil), (*RaftStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_RaftStatus_To_v1beta1_RaftStatus(a.(*v1alpha1.RaftStatus), b.(*RaftStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*XenonOpts)(nil), (*v1alpha1.XenonOpts)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_XenonOpts_To_v1alpha1_XenonOpts(a.(*XenonOpts), b.(*v1alpha1.XenonOpts), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.XenonOpts)(nil), (*XenonOpts)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_XenonOpts_To_v1beta1_XenonOpts(a.(*v1alpha1.XenonOpts), b.(*XenonOpts), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1alpha1.BackupSpec)(nil), (*BackupSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupSpec_To_v1beta1_BackupSpec(a.(*v1alpha1.BackupSpec), b.(*BackupSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1alpha1.BackupStatus)(nil), (*BackupStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_BackupStatus_To_v1beta1_BackupStatus(a.(*v1alpha1.BackupStatus), b.(*BackupStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1alpha1.MysqlClusterSpec)(nil), (*MysqlClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_MysqlClusterSpec_To_v1beta1_MysqlClusterSpec(a.(*v1alpha1.MysqlClusterSpec), b.(*MysqlClusterSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*BackupSpec)(nil), (*v1alpha1.BackupSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BackupSpec_To_v1alpha1_BackupSpec(a.(*BackupSpec), b.(*v1alpha1.BackupSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*BackupStatus)(nil), (*v1alpha1.BackupStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_BackupStatus_To_v1alpha1_BackupStatus(a.(*BackupStatus), b.(*v1alpha1.BackupStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*MysqlClusterSpec)(nil), (*v1alpha1.MysqlClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MysqlClusterSpec_To_v1alpha1_MysqlClusterSpec(a.(*MysqlClusterSpec), b.(*v1alpha1.MysqlClusterSpec), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_Backup_To_v1alpha1_Backup(in *Backup, out *v1alpha1.Backup, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_BackupSpec_To_v1alpha1_BackupSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_BackupStatus_To_v1alpha1_BackupStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Backup_To_v1alpha1_Backup is an autogenerated conversion function. +func Convert_v1beta1_Backup_To_v1alpha1_Backup(in *Backup, out *v1alpha1.Backup, s conversion.Scope) error { + return autoConvert_v1beta1_Backup_To_v1alpha1_Backup(in, out, s) +} + +func autoConvert_v1alpha1_Backup_To_v1beta1_Backup(in *v1alpha1.Backup, out *Backup, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_BackupSpec_To_v1beta1_BackupSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_BackupStatus_To_v1beta1_BackupStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_Backup_To_v1beta1_Backup is an autogenerated conversion function. +func Convert_v1alpha1_Backup_To_v1beta1_Backup(in *v1alpha1.Backup, out *Backup, s conversion.Scope) error { + return autoConvert_v1alpha1_Backup_To_v1beta1_Backup(in, out, s) +} + +func autoConvert_v1beta1_BackupList_To_v1alpha1_BackupList(in *BackupList, out *v1alpha1.BackupList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha1.Backup, len(*in)) + for i := range *in { + if err := Convert_v1beta1_Backup_To_v1alpha1_Backup(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1beta1_BackupList_To_v1alpha1_BackupList is an autogenerated conversion function. +func Convert_v1beta1_BackupList_To_v1alpha1_BackupList(in *BackupList, out *v1alpha1.BackupList, s conversion.Scope) error { + return autoConvert_v1beta1_BackupList_To_v1alpha1_BackupList(in, out, s) +} + +func autoConvert_v1alpha1_BackupList_To_v1beta1_BackupList(in *v1alpha1.BackupList, out *BackupList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Backup, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Backup_To_v1beta1_Backup(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1alpha1_BackupList_To_v1beta1_BackupList is an autogenerated conversion function. +func Convert_v1alpha1_BackupList_To_v1beta1_BackupList(in *v1alpha1.BackupList, out *BackupList, s conversion.Scope) error { + return autoConvert_v1alpha1_BackupList_To_v1beta1_BackupList(in, out, s) +} + +func autoConvert_v1beta1_BackupSpec_To_v1alpha1_BackupSpec(in *BackupSpec, out *v1alpha1.BackupSpec, s conversion.Scope) error { + out.ClusterName = in.ClusterName + // WARNING: in.BackupMethod requires manual conversion: does not exist in peer-type + // WARNING: in.Manual requires manual conversion: does not exist in peer-type + // WARNING: in.BackupSchedule requires manual conversion: does not exist in peer-type + // WARNING: in.BackupOpts requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1alpha1_BackupSpec_To_v1beta1_BackupSpec(in *v1alpha1.BackupSpec, out *BackupSpec, s conversion.Scope) error { + // WARNING: in.Image requires manual conversion: does not exist in peer-type + // WARNING: in.HostName requires manual conversion: does not exist in peer-type + // WARNING: in.NFSServerAddress requires manual conversion: does not exist in peer-type + out.ClusterName = in.ClusterName + // WARNING: in.HistoryLimit requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1beta1_BackupStatus_To_v1alpha1_BackupStatus(in *BackupStatus, out *v1alpha1.BackupStatus, s conversion.Scope) error { + // WARNING: in.Type requires manual conversion: does not exist in peer-type + out.BackupName = in.BackupName + // WARNING: in.BackupSize requires manual conversion: does not exist in peer-type + out.BackupType = in.BackupType + // WARNING: in.StartTime requires manual conversion: does not exist in peer-type + // WARNING: in.CompletionTime requires manual conversion: does not exist in peer-type + // WARNING: in.State requires manual conversion: does not exist in peer-type + // WARNING: in.ManualBackup requires manual conversion: does not exist in peer-type + // WARNING: in.ScheduledBackups requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1alpha1_BackupStatus_To_v1beta1_BackupStatus(in *v1alpha1.BackupStatus, out *BackupStatus, s conversion.Scope) error { + // WARNING: in.Completed requires manual conversion: does not exist in peer-type + out.BackupName = in.BackupName + // WARNING: in.BackupDate requires manual conversion: does not exist in peer-type + out.BackupType = in.BackupType + // WARNING: in.Conditions requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1beta1_ClusterCondition_To_v1alpha1_ClusterCondition(in *ClusterCondition, out *v1alpha1.ClusterCondition, s conversion.Scope) error { + out.Type = v1alpha1.ClusterConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1beta1_ClusterCondition_To_v1alpha1_ClusterCondition is an autogenerated conversion function. +func Convert_v1beta1_ClusterCondition_To_v1alpha1_ClusterCondition(in *ClusterCondition, out *v1alpha1.ClusterCondition, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterCondition_To_v1alpha1_ClusterCondition(in, out, s) +} + +func autoConvert_v1alpha1_ClusterCondition_To_v1beta1_ClusterCondition(in *v1alpha1.ClusterCondition, out *ClusterCondition, s conversion.Scope) error { + out.Type = ClusterConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1alpha1_ClusterCondition_To_v1beta1_ClusterCondition is an autogenerated conversion function. +func Convert_v1alpha1_ClusterCondition_To_v1beta1_ClusterCondition(in *v1alpha1.ClusterCondition, out *ClusterCondition, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterCondition_To_v1beta1_ClusterCondition(in, out, s) +} + +func autoConvert_v1beta1_MysqlCluster_To_v1alpha1_MysqlCluster(in *MysqlCluster, out *v1alpha1.MysqlCluster, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_MysqlClusterSpec_To_v1alpha1_MysqlClusterSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_MysqlClusterStatus_To_v1alpha1_MysqlClusterStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_MysqlCluster_To_v1alpha1_MysqlCluster is an autogenerated conversion function. +func Convert_v1beta1_MysqlCluster_To_v1alpha1_MysqlCluster(in *MysqlCluster, out *v1alpha1.MysqlCluster, s conversion.Scope) error { + return autoConvert_v1beta1_MysqlCluster_To_v1alpha1_MysqlCluster(in, out, s) +} + +func autoConvert_v1alpha1_MysqlCluster_To_v1beta1_MysqlCluster(in *v1alpha1.MysqlCluster, out *MysqlCluster, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_MysqlClusterSpec_To_v1beta1_MysqlClusterSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_MysqlClusterStatus_To_v1beta1_MysqlClusterStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_MysqlCluster_To_v1beta1_MysqlCluster is an autogenerated conversion function. +func Convert_v1alpha1_MysqlCluster_To_v1beta1_MysqlCluster(in *v1alpha1.MysqlCluster, out *MysqlCluster, s conversion.Scope) error { + return autoConvert_v1alpha1_MysqlCluster_To_v1beta1_MysqlCluster(in, out, s) +} + +func autoConvert_v1beta1_MysqlClusterList_To_v1alpha1_MysqlClusterList(in *MysqlClusterList, out *v1alpha1.MysqlClusterList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha1.MysqlCluster, len(*in)) + for i := range *in { + if err := Convert_v1beta1_MysqlCluster_To_v1alpha1_MysqlCluster(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1beta1_MysqlClusterList_To_v1alpha1_MysqlClusterList is an autogenerated conversion function. +func Convert_v1beta1_MysqlClusterList_To_v1alpha1_MysqlClusterList(in *MysqlClusterList, out *v1alpha1.MysqlClusterList, s conversion.Scope) error { + return autoConvert_v1beta1_MysqlClusterList_To_v1alpha1_MysqlClusterList(in, out, s) +} + +func autoConvert_v1alpha1_MysqlClusterList_To_v1beta1_MysqlClusterList(in *v1alpha1.MysqlClusterList, out *MysqlClusterList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MysqlCluster, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_MysqlCluster_To_v1beta1_MysqlCluster(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1alpha1_MysqlClusterList_To_v1beta1_MysqlClusterList is an autogenerated conversion function. +func Convert_v1alpha1_MysqlClusterList_To_v1beta1_MysqlClusterList(in *v1alpha1.MysqlClusterList, out *MysqlClusterList, s conversion.Scope) error { + return autoConvert_v1alpha1_MysqlClusterList_To_v1beta1_MysqlClusterList(in, out, s) +} + +func autoConvert_v1beta1_MysqlClusterSpec_To_v1alpha1_MysqlClusterSpec(in *MysqlClusterSpec, out *v1alpha1.MysqlClusterSpec, s conversion.Scope) error { + out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) + // WARNING: in.User requires manual conversion: does not exist in peer-type + // WARNING: in.MySQLConfig requires manual conversion: does not exist in peer-type + // WARNING: in.Resources requires manual conversion: does not exist in peer-type + // WARNING: in.CustomTLSSecret requires manual conversion: does not exist in peer-type + // WARNING: in.Storage requires manual conversion: does not exist in peer-type + out.MysqlVersion = in.MysqlVersion + // WARNING: in.Xenon requires manual conversion: does not exist in peer-type + // WARNING: in.Backup requires manual conversion: does not exist in peer-type + // WARNING: in.Monitoring requires manual conversion: does not exist in peer-type + // WARNING: in.Image requires manual conversion: does not exist in peer-type + // WARNING: in.MaxLagSeconds requires manual conversion: does not exist in peer-type + // WARNING: in.ImagePullPolicy requires manual conversion: does not exist in peer-type + // WARNING: in.Tolerations requires manual conversion: does not exist in peer-type + // WARNING: in.Affinity requires manual conversion: does not exist in peer-type + // WARNING: in.PriorityClassName requires manual conversion: does not exist in peer-type + out.MinAvailable = in.MinAvailable + // WARNING: in.DataSource requires manual conversion: does not exist in peer-type + // WARNING: in.Standby requires manual conversion: does not exist in peer-type + // WARNING: in.EnableAutoRebuild requires manual conversion: does not exist in peer-type + // WARNING: in.Log requires manual conversion: does not exist in peer-type + // WARNING: in.Service requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1alpha1_MysqlClusterSpec_To_v1beta1_MysqlClusterSpec(in *v1alpha1.MysqlClusterSpec, out *MysqlClusterSpec, s conversion.Scope) error { + out.Replicas = (*int32)(unsafe.Pointer(in.Replicas)) + out.MinAvailable = in.MinAvailable + // WARNING: in.MysqlOpts requires manual conversion: does not exist in peer-type + // WARNING: in.XenonOpts requires manual conversion: does not exist in peer-type + // WARNING: in.MetricsOpts requires manual conversion: does not exist in peer-type + out.MysqlVersion = in.MysqlVersion + // WARNING: in.PodPolicy requires manual conversion: does not exist in peer-type + // WARNING: in.Persistence requires manual conversion: does not exist in peer-type + // WARNING: in.BackupSecretName requires manual conversion: does not exist in peer-type + // WARNING: in.RestoreFrom requires manual conversion: does not exist in peer-type + // WARNING: in.NFSServerAddress requires manual conversion: does not exist in peer-type + // WARNING: in.BackupSchedule requires manual conversion: does not exist in peer-type + // WARNING: in.BothS3NFS requires manual conversion: does not exist in peer-type + // WARNING: in.BackupScheduleJobsHistoryLimit requires manual conversion: does not exist in peer-type + // WARNING: in.TlsSecretName requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1beta1_MysqlClusterStatus_To_v1alpha1_MysqlClusterStatus(in *MysqlClusterStatus, out *v1alpha1.MysqlClusterStatus, s conversion.Scope) error { + out.ReadyNodes = in.ReadyNodes + out.State = v1alpha1.ClusterState(in.State) + out.Conditions = *(*[]v1alpha1.ClusterCondition)(unsafe.Pointer(&in.Conditions)) + out.Nodes = *(*[]v1alpha1.NodeStatus)(unsafe.Pointer(&in.Nodes)) + return nil +} + +// Convert_v1beta1_MysqlClusterStatus_To_v1alpha1_MysqlClusterStatus is an autogenerated conversion function. +func Convert_v1beta1_MysqlClusterStatus_To_v1alpha1_MysqlClusterStatus(in *MysqlClusterStatus, out *v1alpha1.MysqlClusterStatus, s conversion.Scope) error { + return autoConvert_v1beta1_MysqlClusterStatus_To_v1alpha1_MysqlClusterStatus(in, out, s) +} + +func autoConvert_v1alpha1_MysqlClusterStatus_To_v1beta1_MysqlClusterStatus(in *v1alpha1.MysqlClusterStatus, out *MysqlClusterStatus, s conversion.Scope) error { + out.ReadyNodes = in.ReadyNodes + out.State = ClusterState(in.State) + out.Conditions = *(*[]ClusterCondition)(unsafe.Pointer(&in.Conditions)) + out.Nodes = *(*[]NodeStatus)(unsafe.Pointer(&in.Nodes)) + return nil +} + +// Convert_v1alpha1_MysqlClusterStatus_To_v1beta1_MysqlClusterStatus is an autogenerated conversion function. +func Convert_v1alpha1_MysqlClusterStatus_To_v1beta1_MysqlClusterStatus(in *v1alpha1.MysqlClusterStatus, out *MysqlClusterStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_MysqlClusterStatus_To_v1beta1_MysqlClusterStatus(in, out, s) +} + +func autoConvert_v1beta1_NodeCondition_To_v1alpha1_NodeCondition(in *NodeCondition, out *v1alpha1.NodeCondition, s conversion.Scope) error { + out.Type = v1alpha1.NodeConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + return nil +} + +// Convert_v1beta1_NodeCondition_To_v1alpha1_NodeCondition is an autogenerated conversion function. +func Convert_v1beta1_NodeCondition_To_v1alpha1_NodeCondition(in *NodeCondition, out *v1alpha1.NodeCondition, s conversion.Scope) error { + return autoConvert_v1beta1_NodeCondition_To_v1alpha1_NodeCondition(in, out, s) +} + +func autoConvert_v1alpha1_NodeCondition_To_v1beta1_NodeCondition(in *v1alpha1.NodeCondition, out *NodeCondition, s conversion.Scope) error { + out.Type = NodeConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + return nil +} + +// Convert_v1alpha1_NodeCondition_To_v1beta1_NodeCondition is an autogenerated conversion function. +func Convert_v1alpha1_NodeCondition_To_v1beta1_NodeCondition(in *v1alpha1.NodeCondition, out *NodeCondition, s conversion.Scope) error { + return autoConvert_v1alpha1_NodeCondition_To_v1beta1_NodeCondition(in, out, s) +} + +func autoConvert_v1beta1_NodeStatus_To_v1alpha1_NodeStatus(in *NodeStatus, out *v1alpha1.NodeStatus, s conversion.Scope) error { + out.Name = in.Name + out.Message = in.Message + if err := Convert_v1beta1_RaftStatus_To_v1alpha1_RaftStatus(&in.RaftStatus, &out.RaftStatus, s); err != nil { + return err + } + out.Conditions = *(*[]v1alpha1.NodeCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1beta1_NodeStatus_To_v1alpha1_NodeStatus is an autogenerated conversion function. +func Convert_v1beta1_NodeStatus_To_v1alpha1_NodeStatus(in *NodeStatus, out *v1alpha1.NodeStatus, s conversion.Scope) error { + return autoConvert_v1beta1_NodeStatus_To_v1alpha1_NodeStatus(in, out, s) +} + +func autoConvert_v1alpha1_NodeStatus_To_v1beta1_NodeStatus(in *v1alpha1.NodeStatus, out *NodeStatus, s conversion.Scope) error { + out.Name = in.Name + out.Message = in.Message + if err := Convert_v1alpha1_RaftStatus_To_v1beta1_RaftStatus(&in.RaftStatus, &out.RaftStatus, s); err != nil { + return err + } + out.Conditions = *(*[]NodeCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1alpha1_NodeStatus_To_v1beta1_NodeStatus is an autogenerated conversion function. +func Convert_v1alpha1_NodeStatus_To_v1beta1_NodeStatus(in *v1alpha1.NodeStatus, out *NodeStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_NodeStatus_To_v1beta1_NodeStatus(in, out, s) +} + +func autoConvert_v1beta1_RaftStatus_To_v1alpha1_RaftStatus(in *RaftStatus, out *v1alpha1.RaftStatus, s conversion.Scope) error { + out.Role = in.Role + out.Leader = in.Leader + out.Nodes = *(*[]string)(unsafe.Pointer(&in.Nodes)) + return nil +} + +// Convert_v1beta1_RaftStatus_To_v1alpha1_RaftStatus is an autogenerated conversion function. +func Convert_v1beta1_RaftStatus_To_v1alpha1_RaftStatus(in *RaftStatus, out *v1alpha1.RaftStatus, s conversion.Scope) error { + return autoConvert_v1beta1_RaftStatus_To_v1alpha1_RaftStatus(in, out, s) +} + +func autoConvert_v1alpha1_RaftStatus_To_v1beta1_RaftStatus(in *v1alpha1.RaftStatus, out *RaftStatus, s conversion.Scope) error { + out.Role = in.Role + out.Leader = in.Leader + out.Nodes = *(*[]string)(unsafe.Pointer(&in.Nodes)) + return nil +} + +// Convert_v1alpha1_RaftStatus_To_v1beta1_RaftStatus is an autogenerated conversion function. +func Convert_v1alpha1_RaftStatus_To_v1beta1_RaftStatus(in *v1alpha1.RaftStatus, out *RaftStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_RaftStatus_To_v1beta1_RaftStatus(in, out, s) +} + +func autoConvert_v1beta1_XenonOpts_To_v1alpha1_XenonOpts(in *XenonOpts, out *v1alpha1.XenonOpts, s conversion.Scope) error { + out.Image = in.Image + out.AdmitDefeatHearbeatCount = (*int32)(unsafe.Pointer(in.AdmitDefeatHearbeatCount)) + out.ElectionTimeout = (*int32)(unsafe.Pointer(in.ElectionTimeout)) + out.EnableAutoRebuild = in.EnableAutoRebuild + out.Resources = in.Resources + return nil +} + +// Convert_v1beta1_XenonOpts_To_v1alpha1_XenonOpts is an autogenerated conversion function. +func Convert_v1beta1_XenonOpts_To_v1alpha1_XenonOpts(in *XenonOpts, out *v1alpha1.XenonOpts, s conversion.Scope) error { + return autoConvert_v1beta1_XenonOpts_To_v1alpha1_XenonOpts(in, out, s) +} + +func autoConvert_v1alpha1_XenonOpts_To_v1beta1_XenonOpts(in *v1alpha1.XenonOpts, out *XenonOpts, s conversion.Scope) error { + out.Image = in.Image + out.AdmitDefeatHearbeatCount = (*int32)(unsafe.Pointer(in.AdmitDefeatHearbeatCount)) + out.ElectionTimeout = (*int32)(unsafe.Pointer(in.ElectionTimeout)) + out.EnableAutoRebuild = in.EnableAutoRebuild + out.Resources = in.Resources + return nil +} + +// Convert_v1alpha1_XenonOpts_To_v1beta1_XenonOpts is an autogenerated conversion function. +func Convert_v1alpha1_XenonOpts_To_v1beta1_XenonOpts(in *v1alpha1.XenonOpts, out *XenonOpts, s conversion.Scope) error { + return autoConvert_v1alpha1_XenonOpts_To_v1beta1_XenonOpts(in, out, s) +} diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index b8a5c8ce..d79e19b3 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -53,22 +53,6 @@ func (in *Backup) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupCondition) DeepCopyInto(out *BackupCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupCondition. -func (in *BackupCondition) DeepCopy() *BackupCondition { - if in == nil { - return nil - } - out := new(BackupCondition) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupList) DeepCopyInto(out *BackupList) { *out = *in @@ -101,6 +85,31 @@ func (in *BackupList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupOps) DeepCopyInto(out *BackupOps) { + *out = *in + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(S3) + **out = **in + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFS) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupOps. +func (in *BackupOps) DeepCopy() *BackupOps { + if in == nil { + return nil + } + out := new(BackupOps) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupOpts) DeepCopyInto(out *BackupOpts) { *out = *in @@ -118,15 +127,46 @@ func (in *BackupOpts) DeepCopy() *BackupOpts { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { +func (in *BackupSchedule) DeepCopyInto(out *BackupSchedule) { *out = *in - if in.HistoryLimit != nil { - in, out := &in.HistoryLimit, &out.HistoryLimit + if in.BackupRetention != nil { + in, out := &in.BackupRetention, &out.BackupRetention + *out = new(int32) + **out = **in + } + if in.BackupJobHistoryLimit != nil { + in, out := &in.BackupJobHistoryLimit, &out.BackupJobHistoryLimit *out = new(int32) **out = **in } } +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSchedule. +func (in *BackupSchedule) DeepCopy() *BackupSchedule { + if in == nil { + return nil + } + out := new(BackupSchedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { + *out = *in + if in.Manual != nil { + in, out := &in.Manual, &out.Manual + *out = new(ManualBackup) + (*in).DeepCopyInto(*out) + } + if in.BackupSchedule != nil { + in, out := &in.BackupSchedule, &out.BackupSchedule + *out = new(BackupSchedule) + (*in).DeepCopyInto(*out) + } + in.BackupOpts.DeepCopyInto(&out.BackupOpts) +} + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec. func (in *BackupSpec) DeepCopy() *BackupSpec { if in == nil { @@ -140,9 +180,22 @@ func (in *BackupSpec) DeepCopy() *BackupSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]BackupCondition, len(*in)) + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } + if in.ManualBackup != nil { + in, out := &in.ManualBackup, &out.ManualBackup + *out = new(ManualBackupStatus) + (*in).DeepCopyInto(*out) + } + if in.ScheduledBackups != nil { + in, out := &in.ScheduledBackups, &out.ScheduledBackups + *out = make([]ScheduledBackupStatus, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -178,11 +231,8 @@ func (in *ClusterCondition) DeepCopy() *ClusterCondition { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DataSource) DeepCopyInto(out *DataSource) { *out = *in - if in.Remote != nil { - in, out := &in.Remote, &out.Remote - *out = new(RemoteDataSource) - (*in).DeepCopyInto(*out) - } + in.Remote.DeepCopyInto(&out.Remote) + out.S3Backup = in.S3Backup } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSource. @@ -195,21 +245,6 @@ func (in *DataSource) DeepCopy() *DataSource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DatabaseInitSQL) DeepCopyInto(out *DatabaseInitSQL) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseInitSQL. -func (in *DatabaseInitSQL) DeepCopy() *DatabaseInitSQL { - if in == nil { - return nil - } - out := new(DatabaseInitSQL) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExporterSpec) DeepCopyInto(out *ExporterSpec) { *out = *in @@ -234,6 +269,7 @@ func (in *ExporterSpec) DeepCopy() *ExporterSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LogOpts) DeepCopyInto(out *LogOpts) { *out = *in + in.Resources.DeepCopyInto(&out.Resources) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogOpts. @@ -247,13 +283,52 @@ func (in *LogOpts) DeepCopy() *LogOpts { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) { +func (in *ManualBackup) DeepCopyInto(out *ManualBackup) { *out = *in - if in.Exporter != nil { - in, out := &in.Exporter, &out.Exporter - *out = new(ExporterSpec) - (*in).DeepCopyInto(*out) + if in.BackupRetention != nil { + in, out := &in.BackupRetention, &out.BackupRetention + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManualBackup. +func (in *ManualBackup) DeepCopy() *ManualBackup { + if in == nil { + return nil } + out := new(ManualBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManualBackupStatus) DeepCopyInto(out *ManualBackupStatus) { + *out = *in + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManualBackupStatus. +func (in *ManualBackupStatus) DeepCopy() *ManualBackupStatus { + if in == nil { + return nil + } + out := new(ManualBackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) { + *out = *in + in.Exporter.DeepCopyInto(&out.Exporter) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec. @@ -266,6 +341,35 @@ func (in *MonitoringSpec) DeepCopy() *MonitoringSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MySQLConfigs) DeepCopyInto(out *MySQLConfigs) { + *out = *in + if in.MysqlConfig != nil { + in, out := &in.MysqlConfig, &out.MysqlConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PluginConfig != nil { + in, out := &in.PluginConfig, &out.PluginConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MySQLConfigs. +func (in *MySQLConfigs) DeepCopy() *MySQLConfigs { + if in == nil { + return nil + } + out := new(MySQLConfigs) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MySQLStandbySpec) DeepCopyInto(out *MySQLStandbySpec) { *out = *in @@ -353,11 +457,7 @@ func (in *MysqlClusterSpec) DeepCopyInto(out *MysqlClusterSpec) { *out = new(int32) **out = **in } - if in.MySQLConfig != nil { - in, out := &in.MySQLConfig, &out.MySQLConfig - *out = new(string) - **out = **in - } + in.MySQLConfig.DeepCopyInto(&out.MySQLConfig) in.Resources.DeepCopyInto(&out.Resources) if in.CustomTLSSecret != nil { in, out := &in.CustomTLSSecret, &out.CustomTLSSecret @@ -365,11 +465,6 @@ func (in *MysqlClusterSpec) DeepCopyInto(out *MysqlClusterSpec) { (*in).DeepCopyInto(*out) } in.Storage.DeepCopyInto(&out.Storage) - if in.DatabaseInitSQL != nil { - in, out := &in.DatabaseInitSQL, &out.DatabaseInitSQL - *out = new(DatabaseInitSQL) - **out = **in - } in.Xenon.DeepCopyInto(&out.Xenon) in.Backup.DeepCopyInto(&out.Backup) in.Monitoring.DeepCopyInto(&out.Monitoring) @@ -385,22 +480,13 @@ func (in *MysqlClusterSpec) DeepCopyInto(out *MysqlClusterSpec) { *out = new(v1.Affinity) (*in).DeepCopyInto(*out) } - if in.PriorityClassName != nil { - in, out := &in.PriorityClassName, &out.PriorityClassName - *out = new(string) - **out = **in - } - if in.DataSource != nil { - in, out := &in.DataSource, &out.DataSource - *out = new(DataSource) - (*in).DeepCopyInto(*out) - } + in.DataSource.DeepCopyInto(&out.DataSource) if in.Standby != nil { in, out := &in.Standby, &out.Standby *out = new(MySQLStandbySpec) (*in).DeepCopyInto(*out) } - out.Log = in.Log + in.Log.DeepCopyInto(&out.Log) if in.Service != nil { in, out := &in.Service, &out.Service *out = new(ServiceSpec) @@ -447,6 +533,22 @@ func (in *MysqlClusterStatus) DeepCopy() *MysqlClusterStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NFS) DeepCopyInto(out *NFS) { + *out = *in + out.Volume = in.Volume +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFS. +func (in *NFS) DeepCopy() *NFS { + if in == nil { + return nil + } + out := new(NFS) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeCondition) DeepCopyInto(out *NodeCondition) { *out = *in @@ -526,6 +628,59 @@ func (in *RemoteDataSource) DeepCopy() *RemoteDataSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3) DeepCopyInto(out *S3) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3. +func (in *S3) DeepCopy() *S3 { + if in == nil { + return nil + } + out := new(S3) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3BackupDataSource) DeepCopyInto(out *S3BackupDataSource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3BackupDataSource. +func (in *S3BackupDataSource) DeepCopy() *S3BackupDataSource { + if in == nil { + return nil + } + out := new(S3BackupDataSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduledBackupStatus) DeepCopyInto(out *ScheduledBackupStatus) { + *out = *in + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledBackupStatus. +func (in *ScheduledBackupStatus) DeepCopy() *ScheduledBackupStatus { + if in == nil { + return nil + } + out := new(ScheduledBackupStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = *in diff --git a/backup/cronbackup.go b/backup/cronbackup.go index 9696c8a2..e50ca95d 100644 --- a/backup/cronbackup.go +++ b/backup/cronbackup.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package backup import ( diff --git a/charts/mysql-operator/Chart.yaml b/charts/mysql-operator/Chart.yaml index ae8d6ec6..79517fe1 100644 --- a/charts/mysql-operator/Chart.yaml +++ b/charts/mysql-operator/Chart.yaml @@ -15,14 +15,14 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: v2.3.0 +version: v3.0.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v2.3.0" +appVersion: "v3.0.0" dependencies: - name: "mysqlcluster" - version: "v2.3.0" + version: "v3.0.0" \ No newline at end of file diff --git a/charts/mysql-operator/charts/mysql-cluster/Chart.yaml b/charts/mysql-operator/charts/mysql-cluster/Chart.yaml index ab07dbb3..29612e5e 100644 --- a/charts/mysql-operator/charts/mysql-cluster/Chart.yaml +++ b/charts/mysql-operator/charts/mysql-cluster/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 2.3.0 +version: 3.0.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v2.3.0" +appVersion: "v3.0.0" diff --git a/charts/mysql-operator/charts/mysql-cluster/values.yaml b/charts/mysql-operator/charts/mysql-cluster/values.yaml index cc35b6f7..f7079a1b 100644 --- a/charts/mysql-operator/charts/mysql-cluster/values.yaml +++ b/charts/mysql-operator/charts/mysql-cluster/values.yaml @@ -6,7 +6,7 @@ name: "sample" namespace: "default" mysqlVersion: "8.0" -version: v2.3.0 +version: v3.0.0 tls: enable: false diff --git a/charts/mysql-operator/crds/mysql.radondb.com_backups.yaml b/charts/mysql-operator/crds/mysql.radondb.com_backups.yaml index 3abb51f7..893aa821 100644 --- a/charts/mysql-operator/crds/mysql.radondb.com_backups.yaml +++ b/charts/mysql-operator/crds/mysql.radondb.com_backups.yaml @@ -138,17 +138,29 @@ spec: jsonPath: .status.backupName name: BackupName type: string - - description: The Backup Date time - jsonPath: .status.backupDate - name: BackupDate + - description: The Backup Start time + jsonPath: .status.startTime + name: StartTime + type: string + - description: The Backup CompletionTime time + jsonPath: .status.completionTime + name: completionTime type: string - description: The Backup Type jsonPath: .status.backupType name: Type type: string - - description: Whether the backup Success? - jsonPath: .status.conditions[?(@.type=="Complete")].status - name: Success + - description: The Backup Initiator + jsonPath: .status.type + name: Initiator + type: string + - description: The Backup State + jsonPath: .status.state + name: State + type: string + - description: The Backup State + jsonPath: .status.backupSize + name: Size type: string name: v1beta1 schema: @@ -170,80 +182,186 @@ spec: spec: description: BackupSpec defines the desired state of Backup properties: + backupops: + description: Backup Storage + properties: + host: + description: BackupHost + type: string + nfs: + properties: + volume: + description: 'Defines a Volume for backup MySQL data. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes' + properties: + path: + description: 'Path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export + to be mounted with read-only permissions. Defaults to + false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of + the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + type: object + s3: + properties: + secretName: + description: S3 Bucket + type: string + type: object + type: object clusterName: - description: ClusterName represents the cluster name to backup - type: string - historyLimit: - default: 3 - description: History Limit of job - format: int32 - type: integer - hostfrom: - description: HostFrom represents the host for which to take backup - If is empty, is use leader HostFrom + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file + ClusterName is the name of the cluster to be backed up.' type: string - image: - default: radondb/mysql57-sidecar:v2.3.0 - description: To specify the image that will be used for sidecar container. - type: string - nfsServerAddress: - description: Represents the ip address of the nfs server. + manual: + description: Defines details for manual backup Jobs + properties: + backupRetention: + default: 7 + description: Backup Retention + format: int32 + type: integer + type: + type: string + type: object + method: + description: BackupMethod represents the type of backup type: string - required: - - clusterName + schedule: + description: Backup Schedule + properties: + backupRetention: + description: Backup Retention + format: int32 + type: integer + cronExpression: + description: Cron expression for backup schedule + type: string + jobhistoryLimit: + description: History Limit of job + format: int32 + type: integer + type: + type: string + type: object type: object status: - description: BackupStatus defines the observed state of Backup properties: - backupDate: - description: Get the backup Date - type: string backupName: - description: Get the backup path. + type: string + backupSize: type: string backupType: - description: Get the backup Type type: string - completed: - default: false - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state - of cluster Important: Run "make" to regenerate code after modifying - this file' - type: boolean - conditions: - description: Conditions represents the backup resource conditions - list. + completionTime: + format: date-time + type: string + manual: + properties: + active: + description: The number of actively running manual backup Pods. + format: int32 + type: integer + backupName: + type: string + backupSize: + description: Get the backup Size + type: string + backupType: + description: Get the backup Type + type: string + completionTime: + description: Get the backup Type + format: date-time + type: string + failed: + format: int32 + type: integer + finished: + description: Specifies whether or not the Job is finished executing + (does not indicate success or failure). + type: boolean + reason: + type: string + startTime: + description: Get the backup Date + format: date-time + type: string + state: + description: Get current backup status + type: string + succeeded: + description: Conditions represents the backup resource conditions + list. + format: int32 + type: integer + required: + - finished + - reason + type: object + scheduled: items: - description: BackupCondition defines condition struct for backup - resource properties: - lastTransitionTime: - description: LastTransitionTime + backupName: + description: Get the backup path. + type: string + backupSize: + description: Get the backup Size + type: string + backupType: + description: Get the backup Type + type: string + completionTime: + description: Get the backup Type format: date-time type: string - message: - description: Message + cronJobName: + description: The name of the associated scheduled backup CronJob type: string + failed: + format: int32 + type: integer + finished: + description: Specifies whether or not the Job is finished executing + (does not indicate success or failure). + type: boolean reason: - description: Reason type: string - status: - description: Status of the condition, one of (\"True\", \"False\", - \"Unknown\") + startTime: + description: Get the backup Date + format: date-time type: string - type: - description: type of cluster condition, values in (\"Ready\") + state: + description: Get current backup status type: string + succeeded: + description: Conditions represents the backup resource conditions + list. + format: int32 + type: integer required: - - lastTransitionTime - - message + - finished - reason - - status - - type type: object type: array - required: - - completed + startTime: + format: date-time + type: string + state: + type: string + type: + type: string type: object type: object served: true diff --git a/charts/mysql-operator/crds/mysql.radondb.com_mysqlclusters.yaml b/charts/mysql-operator/crds/mysql.radondb.com_mysqlclusters.yaml index 6bcca485..6d5bfeaa 100644 --- a/charts/mysql-operator/crds/mysql.radondb.com_mysqlclusters.yaml +++ b/charts/mysql-operator/crds/mysql.radondb.com_mysqlclusters.yaml @@ -1523,98 +1523,6 @@ spec: spec: description: MysqlClusterSpec defines the desired state of MysqlCluster properties: - MonitoringSpec: - description: Monitoring is the options of metrics container. - properties: - exporter: - properties: - customTLSSecret: - description: Projected secret containing custom TLS certificates - to encrypt output from the exporter web server - properties: - items: - description: If unspecified, each key-value pair in the - Data field of the referenced Secret will be projected - into the volume as a file whose name is the key and - content is the value. If specified, the listed keys - will be projected into the specified paths, and unlisted - keys will not be present. If a key is specified which - is not present in the Secret, the volume setup will - error unless it is marked optional. Paths must be relative - and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits used to set permissions - on this file. Must be an octal value between 0000 - and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON - requires decimal values for mode bits. If not - specified, the volume defaultMode will be used. - This might be in conflict with other options that - affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map - the key to. May not be an absolute path. May not - contain the path element '..'. May not start with - the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - type: object - image: - default: prom/mysqld-exporter:v0.12.1 - description: To specify the image that will be used for metrics - container. - type: string - resources: - description: 'Changing this value causes MySQL and the exporter - to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - type: object affinity: description: 'Scheduling constraints of MySQL pod. Changing this value causes MySQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' @@ -2525,6 +2433,16 @@ spec: dataSource: description: Specifies a data source for bootstrapping the MySQL cluster. properties: + S3backup: + description: Bootstraping from backup + properties: + name: + description: Backup name + type: string + secretName: + description: Secret name + type: string + type: object remote: description: Bootstraping from remote data source properties: @@ -2585,27 +2503,15 @@ spec: type: object type: object type: object - databaseInitSQL: - description: DatabaseInitSQL defines a ConfigMap containing custom - SQL that will be run after the cluster is initialized. This ConfigMap - must be in the same namespace as the cluster. - properties: - key: - description: Key is the ConfigMap data key that points to a SQL - string - type: string - name: - description: Name is the name of a ConfigMap - type: string - required: - - key - - name - type: object enableAutoRebuild: default: false description: If true, when the data is inconsistent, Xenon will automatically rebuild the invalid node. type: boolean + image: + default: percona/percona-server:5.7.34 + description: Specifies mysql image to use. + type: string imagePullPolicy: description: 'ImagePullPolicy is used to determine when Kubernetes will attempt to pull (download) container images. More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy' @@ -2616,15 +2522,172 @@ spec: type: string logOpts: description: LogOpts is the options of log settings. + properties: + auditLogTail: + default: false + description: AuditLogTail represents if tail the mysql audit log. + type: boolean + image: + default: busybox:1.32 + description: To specify the image that will be used for log container. + The busybox image. + type: string + resources: + description: Log container resources of a MySQL container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + slowLogTail: + default: false + description: SlowLogTail represents if tail the mysql slow log. + type: boolean type: object + maxLagTime: + default: 30 + description: MaxLagSeconds configures the readiness probe of mysqld + container if the replication lag is greater than MaxLagSeconds, + the mysqld container will not be not healthy. + minimum: 0 + type: integer minAvailable: default: 50% description: The number of pods from that set that must still be available after the eviction, even in the absence of the evicted pod type: string + monitoringSpec: + description: Monitoring is the options of metrics container. + properties: + exporter: + properties: + customTLSSecret: + description: Projected secret containing custom TLS certificates + to encrypt output from the exporter web server + properties: + items: + description: If unspecified, each key-value pair in the + Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and + content is the value. If specified, the listed keys + will be projected into the specified paths, and unlisted + keys will not be present. If a key is specified which + is not present in the Secret, the volume setup will + error unless it is marked optional. Paths must be relative + and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set permissions + on this file. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON + requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + type: object + enabled: + default: true + description: enabled is used to enable/disable the exporter. + type: boolean + image: + default: prom/mysqld-exporter:v0.12.1 + description: To specify the image that will be used for metrics + container. + type: string + resources: + description: 'Changing this value causes MySQL and the exporter + to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + type: object mysqlConfig: description: MySQLConfig `ConfigMap` name of MySQL config. - type: string + properties: + configMapName: + description: Name of the `ConfigMap` containing MySQL config. + type: string + myCnf: + additionalProperties: + type: string + description: A map[string]string that will be passed to my.cnf + file. The key/value pairs is persisted in the configmap. + type: object + pluginCnf: + additionalProperties: + type: string + type: object + type: object mysqlVersion: default: "5.7" description: 'Represents the MySQL version that will be run. The available diff --git a/charts/mysql-operator/templates/deployment.yaml b/charts/mysql-operator/templates/deployment.yaml index e71c910c..b3a7c479 100644 --- a/charts/mysql-operator/templates/deployment.yaml +++ b/charts/mysql-operator/templates/deployment.yaml @@ -35,7 +35,7 @@ spec: {{- if .Values.rbacProxy.create }} - name: kube-rbac-proxy {{- $imagerepo:=(split "/" .Values.rbacProxy.image)._0 }} - {{- $imagetag:= (split "/" .Values.rbacProxy.image)._1 }} + {{- $imagetag:= (splitList "/" .Values.rbacProxy.image )|last }} {{- if .Values.imageNamespaceOverride}} {{- if .Values.imagePrefix}} image: {{ printf "%s/%s/%s" .Values.imagePrefix .Values.imageNamespaceOverride $imagetag|quote }} @@ -82,7 +82,7 @@ spec: - --metrics-bind-address=127.0.0.1:8080 - --leader-elect {{- $imagerepo:=(split "/" .Values.manager.image)._0 }} - {{- $imagetag:= printf "%s:%s" (split "/" .Values.manager.image)._1 .Values.manager.tag }} + {{- $imagetag:= printf "%s:%s" (splitList "/" .Values.manager.image|last) .Values.manager.tag }} {{- if .Values.imageNamespaceOverride}} {{- if .Values.imagePrefix}} image: {{ printf "%s/%s/%s" .Values.imagePrefix .Values.imageNamespaceOverride $imagetag|quote }} diff --git a/charts/mysql-operator/values.yaml b/charts/mysql-operator/values.yaml index df31e845..a466ae89 100644 --- a/charts/mysql-operator/values.yaml +++ b/charts/mysql-operator/values.yaml @@ -7,7 +7,7 @@ installCRDS: true mysqlcluster: install: false - version: v2.3.0 + version: v3.0.0 ## Specify an imagePullPolicy (Required) ## It's recommended to change this to 'Always' if the image tag is 'latest' ## ref: http://kubernetes.io/docs/user-guide/images/#updating-images @@ -25,7 +25,7 @@ tolerationSeconds: 30 manager: image: radondb/mysql-operator - tag: v2.3.0 + tag: v3.0.0 enableWebhooks: true resources: {} # We usually recommend not to specify default resources and to leave this as a conscious @@ -98,7 +98,7 @@ nfsBackup: webhook: certManager: # If true, make sure that cert-manager has been installed. - enabled: false + enabled: true # If empty and disable certManager, Helm will auto-generate these fields. caBundlePEM: | diff --git a/cmd/manager/main.go b/cmd/manager/main.go index a5050732..e359317d 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -42,6 +42,7 @@ import ( mysqlv1alpha1 "github.com/radondb/radondb-mysql-kubernetes/api/v1alpha1" mysqlv1beta1 "github.com/radondb/radondb-mysql-kubernetes/api/v1beta1" "github.com/radondb/radondb-mysql-kubernetes/controllers" + "github.com/radondb/radondb-mysql-kubernetes/controllers/backup" "github.com/radondb/radondb-mysql-kubernetes/internal" //+kubebuilder:scaffold:imports ) @@ -75,7 +76,7 @@ func main() { } logfmtEncoder := zaplogfmt.NewEncoder(configLog) opts := zap.Options{ - Development: true, + Development: false, } opts.BindFlags(flag.CommandLine) @@ -116,12 +117,21 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "Status") os.Exit(1) } - if err = (&controllers.BackupReconciler{ + // if err = (&controllers.BackupReconciler{ + // Client: mgr.GetClient(), + // Scheme: mgr.GetScheme(), + // Recorder: mgr.GetEventRecorderFor("controller.Backup"), + // }).SetupWithManager(mgr); err != nil { + // setupLog.Error(err, "unable to create controller", "controller", "Backup") + // os.Exit(1) + // } + if err = (&backup.BackupReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("controller.Backup"), + Recorder: mgr.GetEventRecorderFor("controller.Backups"), + Owner: "backup", }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Backup") + setupLog.Error(err, "unable to create v1beta1 controller", "controller", "Backup") os.Exit(1) } if err = (&controllers.MysqlUserReconciler{ diff --git a/cmd/mysql/main.go b/cmd/mysql/main.go index dc4d9a70..50d34621 100644 --- a/cmd/mysql/main.go +++ b/cmd/mysql/main.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package main import ( @@ -19,7 +35,7 @@ import ( ) var ( - clientConfDir = "/etc/my.cnf.d/client.conf" + clientConfDir = "/etc/mysql/client.conf" connectionMaxIdleTime = 30 * time.Second connectionTimeout = 30 * time.Second raftStatusCmd = "xenoncli raft status" diff --git a/cmd/nfsbcp/api/backup.go b/cmd/nfsbcp/api/backup.go index 1a6cabad..47c0d130 100644 --- a/cmd/nfsbcp/api/backup.go +++ b/cmd/nfsbcp/api/backup.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package api import ( diff --git a/cmd/nfsbcp/api/control.go b/cmd/nfsbcp/api/control.go index d7cc2883..adff7391 100644 --- a/cmd/nfsbcp/api/control.go +++ b/cmd/nfsbcp/api/control.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package api import ( diff --git a/cmd/nfsbcp/api/nfs.go b/cmd/nfsbcp/api/nfs.go index 31e0a8b3..076991fe 100644 --- a/cmd/nfsbcp/api/nfs.go +++ b/cmd/nfsbcp/api/nfs.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package api import ( diff --git a/cmd/nfsbcp/api/storage.go b/cmd/nfsbcp/api/storage.go index 4b5dff36..122cb640 100644 --- a/cmd/nfsbcp/api/storage.go +++ b/cmd/nfsbcp/api/storage.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package api import ( diff --git a/cmd/nfsbcp/cmd/bcp.go b/cmd/nfsbcp/cmd/bcp.go index ea16e636..8c58f086 100644 --- a/cmd/nfsbcp/cmd/bcp.go +++ b/cmd/nfsbcp/cmd/bcp.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package cmd import ( diff --git a/cmd/nfsbcp/cmd/root.go b/cmd/nfsbcp/cmd/root.go index 3271e995..8ab36acd 100644 --- a/cmd/nfsbcp/cmd/root.go +++ b/cmd/nfsbcp/cmd/root.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package cmd import ( diff --git a/cmd/nfsbcp/main.go b/cmd/nfsbcp/main.go index 5339680d..92aba3c7 100644 --- a/cmd/nfsbcp/main.go +++ b/cmd/nfsbcp/main.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package main import "github.com/radondb/radondb-mysql-kubernetes/cmd/nfsbcp/cmd" diff --git a/cmd/sidecar/main.go b/cmd/sidecar/main.go index dbbf6d08..f432fbb1 100644 --- a/cmd/sidecar/main.go +++ b/cmd/sidecar/main.go @@ -21,6 +21,7 @@ import ( "os" "time" + "github.com/radondb/radondb-mysql-kubernetes/sidecar" "github.com/radondb/radondb-mysql-kubernetes/utils" "github.com/spf13/cobra" @@ -32,27 +33,25 @@ import ( ) const ( - // The name of the sidecar. - sidecarName = "sidecar" - // The short description of the sidecar. - sidecarShort = "A simple helper for mysql operator." + SidecarName = "sidecar" // The name of the sidecar. + SidecarShort = "A simple helper for mysql operator." // The short description of the sidecar. ) var ( log = logf.Log.WithName("sidecar") // A command for sidecar. cmd = &cobra.Command{ - Use: sidecarName, - Short: sidecarShort, + Use: SidecarName, + Short: SidecarShort, Run: func(cmd *cobra.Command, args []string) { log.Info("run the sidecar, see help section") - os.Exit(1) }, } ) -func main() { - configLog := uzap.NewProductionEncoderConfig() +func init() { + // setup logging + configLog := uzap.NewProductionEncoderConfig() configLog.EncodeTime = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) { encoder.AppendString(ts.UTC().Format(time.RFC3339Nano)) } @@ -60,11 +59,15 @@ func main() { // setup logging logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(os.Stdout), zap.Encoder(logfmtEncoder))) - stop := make(chan struct{}, 1) +} + +func main() { containerName := sidecar.GetContainerType() + stop := make(chan struct{}, 1) - if containerName == utils.ContainerBackupName { + switch containerName { + case utils.ContainerBackupName: backupCfg := sidecar.NewBackupConfig() httpCmd := &cobra.Command{ Use: "http", @@ -72,12 +75,12 @@ func main() { Run: func(cmd *cobra.Command, args []string) { if err := sidecar.RunHttpServer(backupCfg, stop); err != nil { log.Error(err, "run command failed") - os.Exit(1) } }, } cmd.AddCommand(httpCmd) - } else if containerName == utils.ContainerBackupJobName { + + case utils.ContainerBackupJobName: reqBackupCfg := sidecar.NewReqBackupConfig() reqBackupCmd := &cobra.Command{ Use: "request_a_backup", @@ -91,12 +94,12 @@ func main() { Run: func(cmd *cobra.Command, args []string) { if err := sidecar.RunRequestBackup(reqBackupCfg, args[0]); err != nil { log.Error(err, "run command failed") - os.Exit(1) } }, } cmd.AddCommand(reqBackupCmd) - } else { + + default: initCfg := sidecar.NewInitConfig() initCmd := sidecar.NewInitCommand(initCfg) cmd.AddCommand(initCmd) @@ -104,6 +107,5 @@ func main() { if err := cmd.Execute(); err != nil { log.Error(err, "failed to execute command", "cmd", cmd) - os.Exit(1) } } diff --git a/cmd/sidecar/sidecarTest.go b/cmd/sidecar/sidecarTest.go new file mode 100644 index 00000000..8e4e55be --- /dev/null +++ b/cmd/sidecar/sidecarTest.go @@ -0,0 +1,63 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "testing" + + "github.com/radondb/radondb-mysql-kubernetes/sidecar" + "github.com/stretchr/testify/assert" +) + +func TestGetContainerName(t *testing.T) { + containerName := sidecar.GetContainerType() + assert.NotNil(t, containerName) +} + +// func TestBackupConfig(t *testing.T) { +// backupCfg := sidecar.NewBackupConfig() +// assert.NotNil(t, backupCfg) +// } + +func TestReqBackupConfig(t *testing.T) { + reqBackupCfg := sidecar.NewReqBackupConfig() + assert.NotNil(t, reqBackupCfg) +} + +func TestInitConfig(t *testing.T) { + initCfg := sidecar.NewInitConfig() + assert.NotNil(t, initCfg) +} + +// func TestRunHttpServer(t *testing.T) { +// backupCfg := sidecar.NewBackupConfig() +// stop := make(chan struct{}, 1) +// err := sidecar.RunHttpServer(backupCfg, stop) +// assert.Error(t, err) +// } + +func TestRunRequestBackup(t *testing.T) { + reqBackupCfg := sidecar.NewReqBackupConfig() + err := sidecar.RunRequestBackup(reqBackupCfg, "test") + assert.Error(t, err) +} + +func TestInitCommand(t *testing.T) { + initCfg := sidecar.NewInitConfig() + initCmd := sidecar.NewInitCommand(initCfg) + assert.NotNil(t, initCmd) +} diff --git a/cmd/xenon/main.go b/cmd/xenon/main.go index 4a6e17f0..0819ee6a 100644 --- a/cmd/xenon/main.go +++ b/cmd/xenon/main.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package main import ( diff --git a/config/crd/bases/mysql.radondb.com_backups.yaml b/config/crd/bases/mysql.radondb.com_backups.yaml index 3abb51f7..893aa821 100644 --- a/config/crd/bases/mysql.radondb.com_backups.yaml +++ b/config/crd/bases/mysql.radondb.com_backups.yaml @@ -138,17 +138,29 @@ spec: jsonPath: .status.backupName name: BackupName type: string - - description: The Backup Date time - jsonPath: .status.backupDate - name: BackupDate + - description: The Backup Start time + jsonPath: .status.startTime + name: StartTime + type: string + - description: The Backup CompletionTime time + jsonPath: .status.completionTime + name: completionTime type: string - description: The Backup Type jsonPath: .status.backupType name: Type type: string - - description: Whether the backup Success? - jsonPath: .status.conditions[?(@.type=="Complete")].status - name: Success + - description: The Backup Initiator + jsonPath: .status.type + name: Initiator + type: string + - description: The Backup State + jsonPath: .status.state + name: State + type: string + - description: The Backup State + jsonPath: .status.backupSize + name: Size type: string name: v1beta1 schema: @@ -170,80 +182,186 @@ spec: spec: description: BackupSpec defines the desired state of Backup properties: + backupops: + description: Backup Storage + properties: + host: + description: BackupHost + type: string + nfs: + properties: + volume: + description: 'Defines a Volume for backup MySQL data. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes' + properties: + path: + description: 'Path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export + to be mounted with read-only permissions. Defaults to + false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of + the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + type: object + s3: + properties: + secretName: + description: S3 Bucket + type: string + type: object + type: object clusterName: - description: ClusterName represents the cluster name to backup - type: string - historyLimit: - default: 3 - description: History Limit of job - format: int32 - type: integer - hostfrom: - description: HostFrom represents the host for which to take backup - If is empty, is use leader HostFrom + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file + ClusterName is the name of the cluster to be backed up.' type: string - image: - default: radondb/mysql57-sidecar:v2.3.0 - description: To specify the image that will be used for sidecar container. - type: string - nfsServerAddress: - description: Represents the ip address of the nfs server. + manual: + description: Defines details for manual backup Jobs + properties: + backupRetention: + default: 7 + description: Backup Retention + format: int32 + type: integer + type: + type: string + type: object + method: + description: BackupMethod represents the type of backup type: string - required: - - clusterName + schedule: + description: Backup Schedule + properties: + backupRetention: + description: Backup Retention + format: int32 + type: integer + cronExpression: + description: Cron expression for backup schedule + type: string + jobhistoryLimit: + description: History Limit of job + format: int32 + type: integer + type: + type: string + type: object type: object status: - description: BackupStatus defines the observed state of Backup properties: - backupDate: - description: Get the backup Date - type: string backupName: - description: Get the backup path. + type: string + backupSize: type: string backupType: - description: Get the backup Type type: string - completed: - default: false - description: 'INSERT ADDITIONAL STATUS FIELD - define observed state - of cluster Important: Run "make" to regenerate code after modifying - this file' - type: boolean - conditions: - description: Conditions represents the backup resource conditions - list. + completionTime: + format: date-time + type: string + manual: + properties: + active: + description: The number of actively running manual backup Pods. + format: int32 + type: integer + backupName: + type: string + backupSize: + description: Get the backup Size + type: string + backupType: + description: Get the backup Type + type: string + completionTime: + description: Get the backup Type + format: date-time + type: string + failed: + format: int32 + type: integer + finished: + description: Specifies whether or not the Job is finished executing + (does not indicate success or failure). + type: boolean + reason: + type: string + startTime: + description: Get the backup Date + format: date-time + type: string + state: + description: Get current backup status + type: string + succeeded: + description: Conditions represents the backup resource conditions + list. + format: int32 + type: integer + required: + - finished + - reason + type: object + scheduled: items: - description: BackupCondition defines condition struct for backup - resource properties: - lastTransitionTime: - description: LastTransitionTime + backupName: + description: Get the backup path. + type: string + backupSize: + description: Get the backup Size + type: string + backupType: + description: Get the backup Type + type: string + completionTime: + description: Get the backup Type format: date-time type: string - message: - description: Message + cronJobName: + description: The name of the associated scheduled backup CronJob type: string + failed: + format: int32 + type: integer + finished: + description: Specifies whether or not the Job is finished executing + (does not indicate success or failure). + type: boolean reason: - description: Reason type: string - status: - description: Status of the condition, one of (\"True\", \"False\", - \"Unknown\") + startTime: + description: Get the backup Date + format: date-time type: string - type: - description: type of cluster condition, values in (\"Ready\") + state: + description: Get current backup status type: string + succeeded: + description: Conditions represents the backup resource conditions + list. + format: int32 + type: integer required: - - lastTransitionTime - - message + - finished - reason - - status - - type type: object type: array - required: - - completed + startTime: + format: date-time + type: string + state: + type: string + type: + type: string type: object type: object served: true diff --git a/config/crd/bases/mysql.radondb.com_mysqlclusters.yaml b/config/crd/bases/mysql.radondb.com_mysqlclusters.yaml index 6bcca485..6d5bfeaa 100644 --- a/config/crd/bases/mysql.radondb.com_mysqlclusters.yaml +++ b/config/crd/bases/mysql.radondb.com_mysqlclusters.yaml @@ -1523,98 +1523,6 @@ spec: spec: description: MysqlClusterSpec defines the desired state of MysqlCluster properties: - MonitoringSpec: - description: Monitoring is the options of metrics container. - properties: - exporter: - properties: - customTLSSecret: - description: Projected secret containing custom TLS certificates - to encrypt output from the exporter web server - properties: - items: - description: If unspecified, each key-value pair in the - Data field of the referenced Secret will be projected - into the volume as a file whose name is the key and - content is the value. If specified, the listed keys - will be projected into the specified paths, and unlisted - keys will not be present. If a key is specified which - is not present in the Secret, the volume setup will - error unless it is marked optional. Paths must be relative - and may not contain the '..' path or start with '..'. - items: - description: Maps a string key to a path within a volume. - properties: - key: - description: The key to project. - type: string - mode: - description: 'Optional: mode bits used to set permissions - on this file. Must be an octal value between 0000 - and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON - requires decimal values for mode bits. If not - specified, the volume defaultMode will be used. - This might be in conflict with other options that - affect the file mode, like fsGroup, and the result - can be other mode bits set.' - format: int32 - type: integer - path: - description: The relative path of the file to map - the key to. May not be an absolute path. May not - contain the path element '..'. May not start with - the string '..'. - type: string - required: - - key - - path - type: object - type: array - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the Secret or its key must - be defined - type: boolean - type: object - image: - default: prom/mysqld-exporter:v0.12.1 - description: To specify the image that will be used for metrics - container. - type: string - resources: - description: 'Changing this value causes MySQL and the exporter - to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute - resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of - compute resources required. If Requests is omitted for - a container, it defaults to Limits if that is explicitly - specified, otherwise to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - type: object - type: object affinity: description: 'Scheduling constraints of MySQL pod. Changing this value causes MySQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node' @@ -2525,6 +2433,16 @@ spec: dataSource: description: Specifies a data source for bootstrapping the MySQL cluster. properties: + S3backup: + description: Bootstraping from backup + properties: + name: + description: Backup name + type: string + secretName: + description: Secret name + type: string + type: object remote: description: Bootstraping from remote data source properties: @@ -2585,27 +2503,15 @@ spec: type: object type: object type: object - databaseInitSQL: - description: DatabaseInitSQL defines a ConfigMap containing custom - SQL that will be run after the cluster is initialized. This ConfigMap - must be in the same namespace as the cluster. - properties: - key: - description: Key is the ConfigMap data key that points to a SQL - string - type: string - name: - description: Name is the name of a ConfigMap - type: string - required: - - key - - name - type: object enableAutoRebuild: default: false description: If true, when the data is inconsistent, Xenon will automatically rebuild the invalid node. type: boolean + image: + default: percona/percona-server:5.7.34 + description: Specifies mysql image to use. + type: string imagePullPolicy: description: 'ImagePullPolicy is used to determine when Kubernetes will attempt to pull (download) container images. More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy' @@ -2616,15 +2522,172 @@ spec: type: string logOpts: description: LogOpts is the options of log settings. + properties: + auditLogTail: + default: false + description: AuditLogTail represents if tail the mysql audit log. + type: boolean + image: + default: busybox:1.32 + description: To specify the image that will be used for log container. + The busybox image. + type: string + resources: + description: Log container resources of a MySQL container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + slowLogTail: + default: false + description: SlowLogTail represents if tail the mysql slow log. + type: boolean type: object + maxLagTime: + default: 30 + description: MaxLagSeconds configures the readiness probe of mysqld + container if the replication lag is greater than MaxLagSeconds, + the mysqld container will not be not healthy. + minimum: 0 + type: integer minAvailable: default: 50% description: The number of pods from that set that must still be available after the eviction, even in the absence of the evicted pod type: string + monitoringSpec: + description: Monitoring is the options of metrics container. + properties: + exporter: + properties: + customTLSSecret: + description: Projected secret containing custom TLS certificates + to encrypt output from the exporter web server + properties: + items: + description: If unspecified, each key-value pair in the + Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and + content is the value. If specified, the listed keys + will be projected into the specified paths, and unlisted + keys will not be present. If a key is specified which + is not present in the Secret, the volume setup will + error unless it is marked optional. Paths must be relative + and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set permissions + on this file. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON + requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + type: object + enabled: + default: true + description: enabled is used to enable/disable the exporter. + type: boolean + image: + default: prom/mysqld-exporter:v0.12.1 + description: To specify the image that will be used for metrics + container. + type: string + resources: + description: 'Changing this value causes MySQL and the exporter + to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + type: object mysqlConfig: description: MySQLConfig `ConfigMap` name of MySQL config. - type: string + properties: + configMapName: + description: Name of the `ConfigMap` containing MySQL config. + type: string + myCnf: + additionalProperties: + type: string + description: A map[string]string that will be passed to my.cnf + file. The key/value pairs is persisted in the configmap. + type: object + pluginCnf: + additionalProperties: + type: string + type: object + type: object mysqlVersion: default: "5.7" description: 'Represents the MySQL version that will be run. The available diff --git a/config/crd/patches/webhook_in_mysqlclusters.yaml b/config/crd/patches/webhook_in_mysqlclusters.yaml index 8008e7ba..cafad3d3 100644 --- a/config/crd/patches/webhook_in_mysqlclusters.yaml +++ b/config/crd/patches/webhook_in_mysqlclusters.yaml @@ -10,7 +10,7 @@ spec: clientConfig: service: namespace: system - name: webhook-service + name: radondb-mysql-webhook path: /convert conversionReviewVersions: - v1 diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index ce1396e5..81c5c3dd 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -64,11 +64,11 @@ vars: objref: kind: Service version: v1 - name: webhook-service + name: radondb-mysql-webhook fieldref: fieldpath: metadata.namespace - name: SERVICE_NAME objref: kind: Service version: v1 - name: webhook-service + name: radondb-mysql-webhook diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 7d4b254b..1eea4289 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -18,6 +18,18 @@ rules: - patch - update - watch +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - batch resources: diff --git a/config/samples/mysql_v1alpha1_backup.yaml b/config/samples/mysql_v1alpha1_backup.yaml index 4750919c..3469a434 100644 --- a/config/samples/mysql_v1alpha1_backup.yaml +++ b/config/samples/mysql_v1alpha1_backup.yaml @@ -4,7 +4,7 @@ metadata: name: backup-sample spec: # Add fields here - image: radondb/mysql57-sidecar:v2.3.0 + image: radondb/mysql57-sidecar:v3.0.0 # hostname if empty, use the leader as hostname hostName: sample-mysql-0 clusterName: sample diff --git a/config/samples/mysql_v1alpha1_mysqlcluster.yaml b/config/samples/mysql_v1alpha1_mysqlcluster.yaml index 2bbb310d..cea3247c 100644 --- a/config/samples/mysql_v1alpha1_mysqlcluster.yaml +++ b/config/samples/mysql_v1alpha1_mysqlcluster.yaml @@ -17,7 +17,7 @@ spec: # such as nfsServerAddress: "10.233.55.172" # nfsServerAddress: mysqlOpts: - image: acekingke/percona-server:5.7.34 + image: percona/percona-server:5.7.34 user: radondb_usr password: RadonDB@123 database: radondb @@ -35,7 +35,7 @@ spec: cpu: 500m memory: 1Gi xenonOpts: - image: radondb/xenon:v2.3.0 + image: radondb/xenon:v3.0.0 admitDefeatHearbeatCount: 5 electionTimeout: 10000 resources: @@ -57,7 +57,7 @@ spec: memory: 128Mi podPolicy: imagePullPolicy: IfNotPresent - sidecarImage: radondb/mysql57-sidecar:v2.3.0 + sidecarImage: radondb/mysql57-sidecar:v3.0.0 busyboxImage: busybox:1.32 slowLogTail: false auditLogTail: false diff --git a/config/samples/mysql_v1alpha1_mysqlcluster_backup_schedule_demo.yaml b/config/samples/mysql_v1alpha1_mysqlcluster_backup_schedule_demo.yaml index b4fe788a..f9e1844b 100644 --- a/config/samples/mysql_v1alpha1_mysqlcluster_backup_schedule_demo.yaml +++ b/config/samples/mysql_v1alpha1_mysqlcluster_backup_schedule_demo.yaml @@ -34,7 +34,7 @@ spec: memory: 1Gi xenonOpts: - image: radondb/xenon:v2.3.0 + image: radondb/xenon:v3.0.0 admitDefeatHearbeatCount: 5 electionTimeout: 10000 @@ -60,7 +60,7 @@ spec: podPolicy: imagePullPolicy: IfNotPresent - sidecarImage: radondb/mysql57-sidecar:v2.3.0 + sidecarImage: radondb/mysql57-sidecar:v3.0.0 busyboxImage: busybox:1.32 slowLogTail: false diff --git a/config/samples/mysql_v1alpha1_mysqlcluster_mysql8.yaml b/config/samples/mysql_v1alpha1_mysqlcluster_mysql8.yaml index 88003d6f..f2ab3db2 100644 --- a/config/samples/mysql_v1alpha1_mysqlcluster_mysql8.yaml +++ b/config/samples/mysql_v1alpha1_mysqlcluster_mysql8.yaml @@ -35,7 +35,7 @@ spec: memory: 1Gi xenonOpts: - image: radondb/xenon:v2.3.0 + image: radondb/xenon:v3.0.0 admitDefeatHearbeatCount: 5 electionTimeout: 10000 @@ -61,7 +61,7 @@ spec: podPolicy: imagePullPolicy: IfNotPresent - sidecarImage: radondb/mysql80-sidecar:v2.3.0 + sidecarImage: radondb/mysql80-sidecar:v3.0.0 busyboxImage: busybox:1.32 slowLogTail: false diff --git a/config/samples/mysql_v1alpha1_mysqlcluster_podAntiAffinity.yaml b/config/samples/mysql_v1alpha1_mysqlcluster_podAntiAffinity.yaml index eaa98507..dc10c39c 100644 --- a/config/samples/mysql_v1alpha1_mysqlcluster_podAntiAffinity.yaml +++ b/config/samples/mysql_v1alpha1_mysqlcluster_podAntiAffinity.yaml @@ -34,7 +34,7 @@ spec: memory: 1Gi xenonOpts: - image: radondb/xenon:v2.3.0 + image: radondb/xenon:v3.0.0 admitDefeatHearbeatCount: 5 electionTimeout: 10000 @@ -60,7 +60,7 @@ spec: podPolicy: imagePullPolicy: IfNotPresent - sidecarImage: radondb/mysql57-sidecar:v2.3.0 + sidecarImage: radondb/mysql57-sidecar:v3.0.0 busyboxImage: busybox:1.32 slowLogTail: false diff --git a/config/samples/mysql_v1beta1_backup.yaml b/config/samples/mysql_v1beta1_backup.yaml index 6beed697..c72d3add 100644 --- a/config/samples/mysql_v1beta1_backup.yaml +++ b/config/samples/mysql_v1beta1_backup.yaml @@ -4,7 +4,7 @@ metadata: name: backup-sample spec: # Add fields here - image: radondb/mysql57-sidecar:v2.3.0 + image: radondb/mysql57-sidecar:v3.0.0 # hostfrom if empty, use the leader as hostfrom hostfrom: sample-mysql-0 clusterName: sample diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 7117f0ef..f36904f9 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -10,7 +10,7 @@ webhooks: - v1 clientConfig: service: - name: webhook-service + name: radondb-mysql-webhook namespace: system path: /validate-mysql-radondb-com-v1alpha1-mysqlcluster failurePolicy: Fail diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml index 3f638bd9..d36ad6a9 100644 --- a/config/webhook/service.yaml +++ b/config/webhook/service.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - name: webhook-service + name: radondb-mysql-webhook namespace: system spec: ports: diff --git a/controllers/backup/backup_controller.go b/controllers/backup/backup_controller.go new file mode 100644 index 00000000..bd68667e --- /dev/null +++ b/controllers/backup/backup_controller.go @@ -0,0 +1,537 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "context" + "fmt" + "reflect" + + "github.com/pkg/errors" + v1beta1 "github.com/radondb/radondb-mysql-kubernetes/api/v1beta1" + "github.com/radondb/radondb-mysql-kubernetes/utils" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// BackupReconciler reconciles a Backup object. +type BackupReconciler struct { + client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder + Owner client.FieldOwner +} +type BackupResource struct { + cronjobs []*batchv1.CronJob + jobs []*batchv1.Job + mysqlCluster *v1beta1.MysqlCluster +} + +//+kubebuilder:rbac:groups=mysql.radondb.com,resources=backups,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=mysql.radondb.com,resources=backups/status,verbs=get;update;patch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the Backup object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.7.2/pkg/reconcile +func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := log.FromContext(ctx).WithName("controllers").WithName("backup") + + result := reconcile.Result{} + backup := &v1beta1.Backup{} + + if err := r.Client.Get(ctx, req.NamespacedName, backup); err != nil { + // NotFound cannot be fixed by requeuing so ignore it. During background + // deletion, we receive delete events from backup's dependents after + // backup is deleted. + if err = client.IgnoreNotFound(err); err != nil { + log.Error(err, "unable to fetch Backup") + } + return result, err + } + //set default value + + // if backup.Spec.ClusterName is empty, return error + if backup.Spec.ClusterName == "" { + return result, errors.New("backup.Spec.ClusterName is empty") + } + // get MySQLCluster object + cluster := &v1beta1.MysqlCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: backup.Spec.ClusterName, + Namespace: backup.Namespace, + }, + } + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(cluster), cluster); err != nil { + // NotFound cannot be fixed by requeuing so ignore it. During background + // deletion, we receive delete events from backup's dependents after + // backup is deleted. + if err = client.IgnoreNotFound(err); err != nil { + log.Error(err, "unable to fetch MysqlCluster") + } + } + + var err error + // Keep a copy of cluster prior to any manipulations. + before := backup.DeepCopy() + + patchClusterStatus := func() (reconcile.Result, error) { + if !equality.Semantic.DeepEqual(before.Status, backup.Status) { + if err := errors.WithStack(r.Client.Status().Patch( + ctx, backup, client.MergeFrom(before), r.Owner)); err != nil { + log.Error(err, "patching cluster status") + return result, err + } + log.V(1).Info("patched cluster status") + } + return result, err + } + + // create the Result that will be updated while reconciling any/all backup resources + + backupResources, err := r.getBackupResources(ctx, backup) + if err != nil { + // exit early if can't get and clean existing resources as needed to reconcile + return result, errors.WithStack(err) + } + backupResources.mysqlCluster = cluster + if err := r.reconcileManualBackup(ctx, backup, backupResources.jobs, backupResources.mysqlCluster); err != nil { + log.Error(err, "unable to reconcile manual backup") + } + if err := r.reconcileCronBackup(ctx, backup, backupResources.cronjobs, backupResources.jobs, cluster); err != nil { + log.Error(err, "unable to reconcile cron backup") + } + return patchClusterStatus() +} + +// SetupWithManager sets up the controller with the Manager. +func (r *BackupReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1beta1.Backup{}). + Owns(&batchv1.Job{}). + Owns(&batchv1.CronJob{}). + Complete(r) +} + +func (r *BackupReconciler) getBackupResources(ctx context.Context, + backup *v1beta1.Backup) (*BackupResource, error) { + // get the cluster + backupResource := &BackupResource{} + gvks := []schema.GroupVersionKind{{ + Group: batchv1.SchemeGroupVersion.Group, + Version: batchv1.SchemeGroupVersion.Version, + Kind: "JobList", + }, { + Group: batchv1.SchemeGroupVersion.Group, + Version: batchv1.SchemeGroupVersion.Version, + Kind: "CronJobList", + }, + } + selector := BackupSelector(backup.Spec.ClusterName) + for _, gvk := range gvks { + uList := &unstructured.UnstructuredList{} + uList.SetGroupVersionKind(gvk) + if err := r.Client.List(ctx, uList, + client.InNamespace(backup.GetNamespace()), + client.MatchingLabelsSelector{Selector: selector}); err != nil { + return nil, errors.WithStack(err) + } + if len(uList.Items) == 0 { + continue + } + if err := unstructuredToBackupResources(gvk.Kind, backupResource, + uList); err != nil { + return nil, errors.WithStack(err) + } + + } + return backupResource, nil +} + +func unstructuredToBackupResources(kind string, backupResource *BackupResource, + uList *unstructured.UnstructuredList) error { + for _, u := range uList.Items { + switch kind { + case "JobList": + job := &batchv1.Job{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, job); err != nil { + return errors.WithStack(err) + } + backupResource.jobs = append(backupResource.jobs, job) + case "CronJobList": + cronjob := &batchv1.CronJob{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, cronjob); err != nil { + return errors.WithStack(err) + } + backupResource.cronjobs = append(backupResource.cronjobs, cronjob) + } + } + return nil +} + +func (r *BackupReconciler) reconcileManualBackup(ctx context.Context, + backup *v1beta1.Backup, manualBackupJobs []*batchv1.Job, cluster *v1beta1.MysqlCluster) error { + manualStatus := backup.Status.ManualBackup + var currentBackupJob *batchv1.Job + + if backup.Spec.BackupSchedule != nil { + // if the backup is a scheduled backup, ignore manual backups + return nil + } + if len(manualBackupJobs) > 0 { + for _, job := range manualBackupJobs { + if job.GetOwnerReferences()[0].Name == backup.GetName() { + currentBackupJob = job + break + } + } + + if manualStatus != nil && currentBackupJob != nil { + completed := jobCompleted(currentBackupJob) + failed := jobFailed(currentBackupJob) + manualStatus.CompletionTime = currentBackupJob.Status.CompletionTime + manualStatus.StartTime = currentBackupJob.Status.StartTime + manualStatus.Failed = currentBackupJob.Status.Failed + manualStatus.Succeeded = currentBackupJob.Status.Succeeded + manualStatus.Active = currentBackupJob.Status.Active + if completed { + manualStatus.BackupName = currentBackupJob.GetAnnotations()["backupName"] + manualStatus.BackupSize = currentBackupJob.GetAnnotations()["backupSize"] + manualStatus.BackupType = currentBackupJob.GetAnnotations()["backupType"] + + } + if completed || failed { + manualStatus.Finished = true + } + // Get State to the Status + switch { + case currentBackupJob.Status.Succeeded > 0: + manualStatus.State = v1beta1.BackupSucceeded + case currentBackupJob.Status.Active > 0: + manualStatus.State = v1beta1.BackupActive + case currentBackupJob.Status.Failed > 0: + manualStatus.State = v1beta1.BackupFailed + default: + manualStatus.State = v1beta1.BackupStart + } + // return manual backup status to the backup status + backup.Status.BackupName = manualStatus.BackupName + backup.Status.BackupSize = manualStatus.BackupSize + backup.Status.BackupType = manualStatus.BackupType + backup.Status.State = manualStatus.State + backup.Status.CompletionTime = manualStatus.CompletionTime + backup.Status.StartTime = manualStatus.StartTime + backup.Status.Type = v1beta1.ManualBackupInitiator + + } + + } + + // if there is an existing status, see if a new backup id has been provided, and if so reset + // the status and proceed with reconciling a new backup + if manualStatus == nil { + manualStatus = &v1beta1.ManualBackupStatus{ + Finished: false, + } + backup.Status.ManualBackup = manualStatus + } + + // if the status shows the Job is no longer in progress, then simply exit (which means a Job + // that has reached a "completed" or "failed" status is no longer reconciled) + if manualStatus != nil && manualStatus.Finished { + return nil + } + + backupJob := &batchv1.Job{} + backupJob.ObjectMeta = ManualBackupJobMeta(cluster) + if currentBackupJob != nil { + backupJob.ObjectMeta.Name = currentBackupJob.ObjectMeta.Name + } + labels := ManualBackupLabels(cluster.Name) + backupJob.ObjectMeta.Labels = labels + + spec, err := generateBackupJobSpec(backup, cluster, labels) + if err != nil { + return errors.WithStack(err) + } + + backupJob.Spec = *spec + + backupJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) + if err := controllerutil.SetControllerReference(backup, backupJob, + r.Client.Scheme()); err != nil { + return errors.WithStack(err) + } + + if err := r.apply(ctx, backupJob); err != nil { + return errors.WithStack(err) + } + + return nil +} + +func (r *BackupReconciler) apply(ctx context.Context, object client.Object) error { + // Generate an apply-patch by comparing the object to its zero value. + zero := reflect.New(reflect.TypeOf(object).Elem()).Interface() + data, err := client.MergeFrom(zero.(client.Object)).Data(object) + apply := client.RawPatch(client.Apply.Type(), data) + + // Keep a copy of the object before any API calls. + patch := NewJSONPatch() + + // Send the apply-patch with force=true. + if err == nil { + err = r.patch(ctx, object, apply, client.ForceOwnership) + } + + // Send the json-patch when necessary. + if err == nil && !patch.IsEmpty() { + err = r.patch(ctx, object, patch) + } + return err +} + +func (r *BackupReconciler) patch( + ctx context.Context, object client.Object, + patch client.Patch, options ...client.PatchOption, +) error { + options = append([]client.PatchOption{r.Owner}, options...) + return r.Client.Patch(ctx, object, patch, options...) +} + +func (r *BackupReconciler) reconcileCronBackup(ctx context.Context, backup *v1beta1.Backup, + cronBackupJobs []*batchv1.CronJob, BackupJobs []*batchv1.Job, cluster *v1beta1.MysqlCluster) error { + log := log.FromContext(ctx).WithValues("backip", "CronJob") + + if backup.Spec.BackupSchedule == nil { + // if the backup is a manual backup, ignore scheduled backups + return nil + } + // Update backup.Status.ScheduledBackups + scheduledStatus := []v1beta1.ScheduledBackupStatus{} + for _, job := range BackupJobs { + sbs := v1beta1.ScheduledBackupStatus{} + if job.GetLabels()[LableCronJob] != "" { + if len(job.GetOwnerReferences()) > 0 { + sbs.CronJobName = job.OwnerReferences[0].Name + } + sbs.BackupName = job.GetAnnotations()["backupName"] + sbs.BackupSize = job.GetAnnotations()["backupSize"] + sbs.BackupType = job.GetAnnotations()["backupType"] + sbs.CompletionTime = job.Status.CompletionTime + sbs.Failed = job.Status.Failed + sbs.Succeeded = job.Status.Succeeded + sbs.StartTime = job.Status.StartTime + if jobCompleted(job) || jobFailed(job) { + sbs.Finished = true + } + switch { + case job.Status.Succeeded > 0: + sbs.State = v1beta1.BackupSucceeded + case job.Status.Active > 0: + sbs.State = v1beta1.BackupActive + case job.Status.Failed > 0: + sbs.State = v1beta1.BackupFailed + default: + sbs.State = v1beta1.BackupStart + } + scheduledStatus = append(scheduledStatus, sbs) + } + } + // fill the backup status, always return the latest backup job status + if len(scheduledStatus) > 0 { + latestScheduledStatus := scheduledStatus[len(scheduledStatus)-1] + backup.Status.StartTime = latestScheduledStatus.StartTime + backup.Status.CompletionTime = latestScheduledStatus.CompletionTime + backup.Status.BackupName = latestScheduledStatus.BackupName + backup.Status.BackupSize = latestScheduledStatus.BackupSize + backup.Status.Type = v1beta1.CronJobBackupInitiator + backup.Status.State = latestScheduledStatus.State + backup.Status.BackupType = latestScheduledStatus.BackupType + } + // file the scheduled backup status + backup.Status.ScheduledBackups = scheduledStatus + + labels := CronBackupLabels(cluster.Name) + objectMeta := CronBackupJobMeta(cluster) + for _, cronjob := range cronBackupJobs { + if cronjob.GetDeletionTimestamp() != nil { + continue + } + if cronjob.GetLabels()[LabelCluster] == cluster.Name && + cronjob.GetLabels()[LableCronJob] == "true" { + objectMeta = metav1.ObjectMeta{ + Namespace: backup.GetNamespace(), + Name: cronjob.Name, + } + + } + + } + objectMeta.Labels = labels + // objectmeta.Annotations = annotations + jobSpec, err := generateBackupJobSpec(backup, cluster, labels) + if err != nil { + return errors.WithStack(err) + } + suspend := (cluster.Status.State != v1beta1.ClusterReadyState) || (cluster.Spec.Standby != nil) + cronJob := &batchv1.CronJob{ + ObjectMeta: objectMeta, + Spec: batchv1.CronJobSpec{ + Schedule: backup.Spec.BackupSchedule.CronExpression, + Suspend: &suspend, + ConcurrencyPolicy: batchv1.ForbidConcurrent, + JobTemplate: batchv1.JobTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: *jobSpec, + }, + }, + } + cronJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("CronJob")) + if err := controllerutil.SetControllerReference(backup, cronJob, + r.Client.Scheme()); err != nil { + return errors.WithStack(err) + } + if err := r.apply(ctx, cronJob); err != nil { + log.Error(err, "error when attempting to create Backup CronJob") + } + + return nil + +} + +func generateBackupJobSpec(backup *v1beta1.Backup, cluster *v1beta1.MysqlCluster, labels map[string]string) (*batchv1.JobSpec, error) { + + // If backup.Spec.BackupOpts.S3 is not nil then use ENV BACKUP_TYPE=s3 and set the s3SecretName + // If backup.Spec.BackupOpts.NFS is not nil then use ENV BACKUP_TYPE=nfs and mount the nfs volume + + backupHost := GetBackupHost(cluster) + backupImage := cluster.Spec.Backup.Image + serviceAccountName := backup.Spec.ClusterName + clusterAuthsctName := fmt.Sprintf("%s-secret", cluster.GetName()) + var S3BackuptEnv []corev1.EnvVar + var NFSBackupEnv *corev1.EnvVar + var backupTypeEnv corev1.EnvVar + var NFSVolume *corev1.Volume + var NFSVolumeMount *corev1.VolumeMount + + if backup.Spec.BackupOpts.S3 != nil && backup.Spec.BackupOpts.NFS != nil { + return nil, errors.New("backup can only be configured with one of S3 or NFS") + } + + if backup.Spec.BackupOpts.S3 != nil { + s3SecretName := backup.Spec.BackupOpts.S3.BackupSecretName + S3BackuptEnv = append(S3BackuptEnv, + getEnvVarFromSecret(s3SecretName, "S3_ENDPOINT", "s3-endpoint", false), + getEnvVarFromSecret(s3SecretName, "S3_ACCESSKEY", "s3-access-key", true), + getEnvVarFromSecret(s3SecretName, "S3_SECRETKEY", "s3-secret-key", true), + getEnvVarFromSecret(s3SecretName, "S3_BUCKET", "s3-bucket", true), + ) + backupTypeEnv = corev1.EnvVar{Name: "BACKUP_TYPE", Value: "s3"} + + } + + if backup.Spec.BackupOpts.NFS != nil { + NFSVolume = &corev1.Volume{ + Name: "nfs-backup", + VolumeSource: corev1.VolumeSource{NFS: &backup.Spec.BackupOpts.NFS.Volume}, + } + NFSVolumeMount = &corev1.VolumeMount{ + Name: "nfs-backup", + MountPath: "/backup", + } + backupTypeEnv = corev1.EnvVar{Name: "BACKUP_TYPE", Value: "nfs"} + + } + + container := corev1.Container{ + Env: []corev1.EnvVar{ + {Name: "CONTAINER_TYPE", Value: utils.ContainerBackupJobName}, + {Name: "NAMESPACE", Value: cluster.Namespace}, + {Name: "CLUSTER_NAME", Value: cluster.GetName()}, + {Name: "SERVICE_NAME", Value: fmt.Sprintf("%s-mysql", cluster.GetName())}, + {Name: "HOST_NAME", Value: backupHost}, + {Name: "JOB_NAME", ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.labels['job-name']", + }, + }}, + }, + Image: backupImage, + ImagePullPolicy: cluster.Spec.ImagePullPolicy, + Name: utils.ContainerBackupName, + } + container.Args = []string{ + "request_a_backup", + GetXtrabackupURL(GetBackupHost(cluster)), + } + // Add backup user and password to the env + container.Env = append(container.Env, + getEnvVarFromSecret(clusterAuthsctName, "BACKUP_USER", "backup-user", true), + getEnvVarFromSecret(clusterAuthsctName, "BACKUP_PASSWORD", "backup-password", true), + ) + if NFSBackupEnv != nil && S3BackuptEnv != nil { + container.Env = append(container.Env, *NFSBackupEnv) + container.Env = append(container.Env, S3BackuptEnv...) + } + + if NFSVolumeMount != nil { + container.VolumeMounts = append(container.VolumeMounts, *NFSVolumeMount) + } + + container.Env = append(container.Env, backupTypeEnv) + + jobSpec := &batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: labels}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{container}, + RestartPolicy: corev1.RestartPolicyNever, + ServiceAccountName: serviceAccountName, + }, + }, + } + if NFSVolume != nil { + jobSpec.Template.Spec.Volumes = []corev1.Volume{*NFSVolume} + } + var backoffLimit int32 = 1 + + jobSpec.Template.Spec.Tolerations = cluster.Spec.Tolerations + jobSpec.Template.Spec.Affinity = cluster.Spec.Affinity + jobSpec.BackoffLimit = &backoffLimit + return jobSpec, nil +} diff --git a/controllers/backup/kubeapi.go b/controllers/backup/kubeapi.go new file mode 100644 index 00000000..4512dcda --- /dev/null +++ b/controllers/backup/kubeapi.go @@ -0,0 +1,189 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "strings" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// escapeJSONPointer encodes '~' and '/' according to RFC 6901. +var escapeJSONPointer = strings.NewReplacer( + "~", "~0", + "/", "~1", +).Replace + +// JSON6902 represents a JSON Patch according to RFC 6902; the same as +// k8s.io/apimachinery/pkg/types.JSONPatchType. +type JSON6902 []interface{} + +// NewJSONPatch creates a new JSON Patch according to RFC 6902; the same as +// k8s.io/apimachinery/pkg/types.JSONPatchType. +func NewJSONPatch() *JSON6902 { return &JSON6902{} } + +func (*JSON6902) pointer(tokens ...string) string { + var b strings.Builder + + for _, t := range tokens { + _ = b.WriteByte('/') + _, _ = b.WriteString(escapeJSONPointer(t)) + } + + return b.String() +} + +// Add appends an "add" operation to patch. +// +// > The "add" operation performs one of the following functions, +// > depending upon what the target location references: +// > +// > o If the target location specifies an array index, a new value is +// > inserted into the array at the specified index. +// > +// > o If the target location specifies an object member that does not +// > already exist, a new member is added to the object. +// > +// > o If the target location specifies an object member that does exist, +// > that member's value is replaced. +func (patch *JSON6902) Add(path ...string) func(value interface{}) *JSON6902 { + i := len(*patch) + f := func(value interface{}) *JSON6902 { + (*patch)[i] = map[string]interface{}{ + "op": "add", + "path": patch.pointer(path...), + "value": value, + } + return patch + } + + *patch = append(*patch, f) + + return f +} + +// Remove appends a "remove" operation to patch. +// +// > The "remove" operation removes the value at the target location. +// > +// > The target location MUST exist for the operation to be successful. +func (patch *JSON6902) Remove(path ...string) *JSON6902 { + *patch = append(*patch, map[string]interface{}{ + "op": "remove", + "path": patch.pointer(path...), + }) + + return patch +} + +// Replace appends a "replace" operation to patch. +// +// > The "replace" operation replaces the value at the target location +// > with a new value. +// > +// > The target location MUST exist for the operation to be successful. +func (patch *JSON6902) Replace(path ...string) func(value interface{}) *JSON6902 { + i := len(*patch) + f := func(value interface{}) *JSON6902 { + (*patch)[i] = map[string]interface{}{ + "op": "replace", + "path": patch.pointer(path...), + "value": value, + } + return patch + } + + *patch = append(*patch, f) + + return f +} + +// Bytes returns the JSON representation of patch. +func (patch JSON6902) Bytes() ([]byte, error) { return patch.Data(nil) } + +// Data returns the JSON representation of patch. +func (patch JSON6902) Data(client.Object) ([]byte, error) { return json.Marshal(patch) } + +// IsEmpty returns true when patch has no operations. +func (patch JSON6902) IsEmpty() bool { return len(patch) == 0 } + +// Type returns k8s.io/apimachinery/pkg/types.JSONPatchType. +func (patch JSON6902) Type() types.PatchType { return types.JSONPatchType } + +// Merge7386 represents a JSON Merge Patch according to RFC 7386; the same as +// k8s.io/apimachinery/pkg/types.MergePatchType. +type Merge7386 map[string]interface{} + +// NewMergePatch creates a new JSON Merge Patch according to RFC 7386; the same +// as k8s.io/apimachinery/pkg/types.MergePatchType. +func NewMergePatch() *Merge7386 { return &Merge7386{} } + +// Add modifies patch to indicate that the member at path should be added or +// replaced with value. +// +// > If the provided merge patch contains members that do not appear +// > within the target, those members are added. If the target does +// > contain the member, the value is replaced. Null values in the merge +// > patch are given special meaning to indicate the removal of existing +// > values in the target. +func (patch *Merge7386) Add(path ...string) func(value interface{}) *Merge7386 { + position := *patch + + for len(path) > 1 { + p, ok := position[path[0]].(Merge7386) + if !ok { + p = Merge7386{} + position[path[0]] = p + } + + position = p + path = path[1:] + } + + if len(path) < 1 { + return func(interface{}) *Merge7386 { return patch } + } + + f := func(value interface{}) *Merge7386 { + position[path[0]] = value + return patch + } + + position[path[0]] = f + + return f +} + +// Remove modifies patch to indicate that the member at path should be removed +// if it exists. +func (patch *Merge7386) Remove(path ...string) *Merge7386 { + return patch.Add(path...)(nil) +} + +// Bytes returns the JSON representation of patch. +func (patch Merge7386) Bytes() ([]byte, error) { return patch.Data(nil) } + +// Data returns the JSON representation of patch. +func (patch Merge7386) Data(client.Object) ([]byte, error) { return json.Marshal(patch) } + +// IsEmpty returns true when patch has no modifications. +func (patch Merge7386) IsEmpty() bool { return len(patch) == 0 } + +// Type returns k8s.io/apimachinery/pkg/types.MergePatchType. +func (patch Merge7386) Type() types.PatchType { return types.MergePatchType } diff --git a/controllers/backup/utils.go b/controllers/backup/utils.go new file mode 100644 index 00000000..f543f43c --- /dev/null +++ b/controllers/backup/utils.go @@ -0,0 +1,132 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "fmt" + + "github.com/radondb/radondb-mysql-kubernetes/api/v1beta1" + "github.com/radondb/radondb-mysql-kubernetes/utils" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/rand" +) + +// Define the label of backup. +const ( + labelPrefix = "backups.mysql.radondb.com/" + LabelCluster = labelPrefix + "cluster" + LableCronJob = labelPrefix + "cronjob" + LableManualJob = labelPrefix + "manualjob" +) + +// Define the annotation of backup. +const ( + AnnotationPrefix = "backups.mysql.radondb.com/" +) + +func BackupSelector(clusterName string) labels.Selector { + return labels.SelectorFromSet(map[string]string{ + LabelCluster: clusterName, + }) +} + +// jobCompleted returns "true" if the Job provided completed successfully. Otherwise it returns +// "false". +func jobCompleted(job *batchv1.Job) bool { + conditions := job.Status.Conditions + for i := range conditions { + if conditions[i].Type == batchv1.JobComplete { + return (conditions[i].Status == corev1.ConditionTrue) + } + } + return false +} + +// jobFailed returns "true" if the Job provided has failed. Otherwise it returns "false". +func jobFailed(job *batchv1.Job) bool { + conditions := job.Status.Conditions + for i := range conditions { + if conditions[i].Type == batchv1.JobFailed { + return (conditions[i].Status == corev1.ConditionTrue) + } + } + return false +} + +func ManualBackupJobMeta(cluster *v1beta1.MysqlCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Name: cluster.GetName() + "-backup-" + rand.String(4), + Namespace: cluster.GetNamespace(), + } +} + +func CronBackupJobMeta(cluster *v1beta1.MysqlCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Name: cluster.GetName() + "-backup-" + rand.String(4), + Namespace: cluster.GetNamespace(), + } +} + +func ManualBackupLabels(clusterName string) labels.Set { + return map[string]string{ + LabelCluster: clusterName, + LableManualJob: "true", + } +} + +func CronBackupLabels(clusterName string) labels.Set { + return map[string]string{ + LabelCluster: clusterName, + LableCronJob: "true", + } +} + +func GetBackupHost(cluster *v1beta1.MysqlCluster) string { + var host string + nodeConditions := cluster.Status.Nodes + for _, nodeCondition := range nodeConditions { + host = nodeCondition.Name + if nodeCondition.RaftStatus.Role == "FOLLOWER" && nodeCondition.Conditions[0].Status == "False" { + host = nodeCondition.Name + } + } + return host +} + +func GetXtrabackupURL(backupHost string) string { + xtrabackupPort := utils.XBackupPort + url := fmt.Sprintf("%s:%d", backupHost, xtrabackupPort) + return url +} + +func getEnvVarFromSecret(sctName, name, key string, opt bool) corev1.EnvVar { + return corev1.EnvVar{ + Name: name, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: sctName, + }, + Key: key, + Optional: &opt, + }, + }, + } +} diff --git a/docs/crd_mysqlcluster_v1beta1.md b/docs/crd_mysqlcluster_v1beta1.md new file mode 100644 index 00000000..e6d0f01c --- /dev/null +++ b/docs/crd_mysqlcluster_v1beta1.md @@ -0,0 +1,259 @@ + +### Custom Resources + + +### Sub Resources + +* [BackupOpts](#backupopts) +* [ClusterCondition](#clustercondition) +* [DataSource](#datasource) +* [DatabaseInitSQL](#databaseinitsql) +* [ExporterSpec](#exporterspec) +* [LogOpts](#logopts) +* [MonitoringSpec](#monitoringspec) +* [MySQLStandbySpec](#mysqlstandbyspec) +* [MysqlCluster](#mysqlcluster) +* [MysqlClusterList](#mysqlclusterlist) +* [MysqlClusterSpec](#mysqlclusterspec) +* [MysqlClusterStatus](#mysqlclusterstatus) +* [NodeCondition](#nodecondition) +* [NodeStatus](#nodestatus) +* [RaftStatus](#raftstatus) +* [RemoteDataSource](#remotedatasource) +* [ServiceSpec](#servicespec) +* [XenonOpts](#xenonopts) + +#### BackupOpts + + + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| image | Image is the image of backup container. | string | false | +| resources | Changing this value causes MySQL More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers | corev1.ResourceRequirements | false | + +[Back to Custom Resources](#custom-resources) + +#### ClusterCondition + +ClusterCondition defines type for cluster conditions. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| type | Type of cluster condition, values in (\"Initializing\", \"Ready\", \"Error\"). | ClusterConditionType | true | +| status | Status of the condition, one of (\"True\", \"False\", \"Unknown\"). | [corev1.ConditionStatus](https://pkg.go.dev/k8s.io/api/core/v1#ConditionStatus) | true | +| lastTransitionTime | The last time this Condition type changed. | [metav1.Time](https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Time) | true | +| reason | One word, camel-case reason for current status of the condition. | string | false | +| message | Full text reason for current status of the condition. | string | false | + +[Back to Custom Resources](#custom-resources) + +#### DataSource + + + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| remote | Bootstraping from remote data source | *[RemoteDataSource](#remotedatasource) | false | + +[Back to Custom Resources](#custom-resources) + +#### DatabaseInitSQL + +DatabaseInitSQL defines a ConfigMap containing custom SQL that will be run after the cluster is initialized. This ConfigMap must be in the same namespace as the cluster. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| name | Name is the name of a ConfigMap | string | true | +| key | Key is the ConfigMap data key that points to a SQL string | string | true | + +[Back to Custom Resources](#custom-resources) + +#### ExporterSpec + + + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| customTLSSecret | Projected secret containing custom TLS certificates to encrypt output from the exporter web server | *corev1.SecretProjection | false | +| image | To specify the image that will be used for metrics container. | string | false | +| resources | Changing this value causes MySQL and the exporter to restart. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers | corev1.ResourceRequirements | false | +| enabled | enabled is used to enable/disable the exporter. | bool | false | + +[Back to Custom Resources](#custom-resources) + +#### LogOpts + + + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| image | To specify the image that will be used for log container. The busybox image. | string | false | +| slowLogTail | SlowLogTail represents if tail the mysql slow log. | bool | false | +| auditLogTail | AuditLogTail represents if tail the mysql audit log. | bool | false | +| resources | Log container resources of a MySQL container. | corev1.ResourceRequirements | false | + +[Back to Custom Resources](#custom-resources) + +#### MonitoringSpec + + + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| exporter | | [ExporterSpec](#exporterspec) | false | + +[Back to Custom Resources](#custom-resources) + +#### MySQLStandbySpec + + + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| enabled | Whether or not the MySQL cluster should be read-only. When this is true, the cluster will be read-only. When this is false, the cluster will run as writable. | bool | true | +| clusterName | The name of the MySQL cluster to follow for binlog. | string | false | +| host | Network address of the MySQL server to follow via via binlog replication. | string | false | +| port | Network port of the MySQL server to follow via binlog replication. | *int32 | false | + +[Back to Custom Resources](#custom-resources) + +#### MysqlCluster + +MysqlCluster is the Schema for the mysqlclusters API + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| metadata | | [metav1.ObjectMeta](https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta) | false | +| spec | | [MysqlClusterSpec](#mysqlclusterspec) | false | +| status | | [MysqlClusterStatus](#mysqlclusterstatus) | false | + +[Back to Custom Resources](#custom-resources) + +#### MysqlClusterList + +MysqlClusterList contains a list of MysqlCluster + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| metadata | | [metav1.ListMeta](https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#ListMeta) | false | +| items | | [][MysqlCluster](#mysqlcluster) | true | + +[Back to Custom Resources](#custom-resources) + +#### MysqlClusterSpec + +MysqlClusterSpec defines the desired state of MysqlCluster + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| replicas | Replicas is the number of pods. | *int32 | false | +| user | Username of new user to create. Only be a combination of letters, numbers or underlines. The length can not exceed 26 characters. | string | false | +| mysqlConfig | MySQLConfig `ConfigMap` name of MySQL config. | string | false | +| resources | Compute resources of a MySQL container. | corev1.ResourceRequirements | false | +| customTLSSecret | Containing CA (ca.crt) and server cert (tls.crt), server private key (tls.key) for SSL | corev1.SecretProjection | false | +| storage | Defines a PersistentVolumeClaim for MySQL data. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes | [corev1.PersistentVolumeClaimSpec](https://pkg.go.dev/k8s.io/api/core/v1#PersistentVolumeClaimSpec) | true | +| mysqlVersion | Represents the MySQL version that will be run. The available version can be found here: This field should be set even if the Image is set to let the operator know which mysql version is running. Based on this version the operator can take decisions which features can be used. | string | false | +| databaseInitSQL | DatabaseInitSQL defines a ConfigMap containing custom SQL that will be run after the cluster is initialized. This ConfigMap must be in the same namespace as the cluster. | *[DatabaseInitSQL](#databaseinitsql) | false | +| xenonOpts | XenonOpts is the options of xenon container. | [XenonOpts](#xenonopts) | false | +| backupOpts | Backup is the options of backup container. | [BackupOpts](#backupopts) | false | +| monitoringSpec | Monitoring is the options of metrics container. | [MonitoringSpec](#monitoringspec) | false | +| image | Specifies mysql image to use. | string | false | +| maxLagTime | MaxLagSeconds configures the readiness probe of mysqld container if the replication lag is greater than MaxLagSeconds, the mysqld container will not be not healthy. | int | false | +| imagePullPolicy | ImagePullPolicy is used to determine when Kubernetes will attempt to pull (download) container images. More info: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy | corev1.PullPolicy | false | +| tolerations | Tolerations of a MySQL pod. Changing this value causes MySQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration | []corev1.Toleration | false | +| affinity | Scheduling constraints of MySQL pod. Changing this value causes MySQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node | *corev1.Affinity | false | +| priorityClassName | Priority class name for the MySQL pods. Changing this value causes MySQL to restart. More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ | string | false | +| minAvailable | The number of pods from that set that must still be available after the eviction, even in the absence of the evicted pod | string | false | +| dataSource | Specifies a data source for bootstrapping the MySQL cluster. | *[DataSource](#datasource) | false | +| standby | Run this cluster as a read-only copy of an existing cluster or archive. | *[MySQLStandbySpec](#mysqlstandbyspec) | false | +| enableAutoRebuild | If true, when the data is inconsistent, Xenon will automatically rebuild the invalid node. | bool | false | +| logOpts | LogOpts is the options of log settings. | [LogOpts](#logopts) | false | +| service | Specification of the service that exposes the MySQL leader instance. | *[ServiceSpec](#servicespec) | false | + +[Back to Custom Resources](#custom-resources) + +#### MysqlClusterStatus + +MysqlClusterStatus defines the observed state of MysqlCluster + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| readyNodes | ReadyNodes represents number of the nodes that are in ready state. | int | false | +| state | State | ClusterState | false | +| conditions | Conditions contains the list of the cluster conditions fulfilled. | [][ClusterCondition](#clustercondition) | false | +| nodes | Nodes contains the list of the node status fulfilled. | [][NodeStatus](#nodestatus) | false | + +[Back to Custom Resources](#custom-resources) + +#### NodeCondition + +NodeCondition defines type for representing node conditions. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| type | Type of the node condition. | NodeConditionType | true | +| status | Status of the node, one of (\"True\", \"False\", \"Unknown\"). | [corev1.ConditionStatus](https://pkg.go.dev/k8s.io/api/core/v1#ConditionStatus) | true | +| lastTransitionTime | The last time this Condition type changed. | [metav1.Time](https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Time) | true | + +[Back to Custom Resources](#custom-resources) + +#### NodeStatus + +NodeStatus defines type for status of a node into cluster. + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| name | Name of the node. | string | true | +| message | Full text reason for current status of the node. | string | false | +| raftStatus | RaftStatus is the raft status of the node. | [RaftStatus](#raftstatus) | false | +| conditions | Conditions contains the list of the node conditions fulfilled. | [][NodeCondition](#nodecondition) | false | + +[Back to Custom Resources](#custom-resources) + +#### RaftStatus + + + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| role | Role is one of (LEADER/CANDIDATE/FOLLOWER/IDLE/INVALID) | string | false | +| leader | Leader is the name of the Leader of the current node. | string | false | +| nodes | Nodes is a list of nodes that can be identified by the current node. | []string | false | + +[Back to Custom Resources](#custom-resources) + +#### RemoteDataSource + + + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| sourceConfig | | *corev1.SecretProjection | false | + +[Back to Custom Resources](#custom-resources) + +#### ServiceSpec + + + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| nodePort | The port on which this service is exposed when type is NodePort or LoadBalancer. Value must be in-range and not in use or the operation will fail. If unspecified, a port will be allocated if this Service requires one. - https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport | *int32 | false | +| type | More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types | string | true | + +[Back to Custom Resources](#custom-resources) + +#### XenonOpts + + + +| Field | Description | Scheme | Required | +| ----- | ----------- | ------ | -------- | +| image | To specify the image that will be used for xenon container. | string | false | +| admitDefeatHearbeatCount | High available component admit defeat heartbeat count. | *int32 | false | +| electionTimeout | High available component election timeout. The unit is millisecond. | *int32 | false | +| enableAutoRebuild | If true, when the data is inconsistent, Xenon will automatically rebuild the invalid node. | bool | false | +| resources | The compute resource requirements. | corev1.ResourceRequirements | false | + +[Back to Custom Resources](#custom-resources) diff --git a/docs/links.csv b/docs/links.csv new file mode 100644 index 00000000..da8c2c6f --- /dev/null +++ b/docs/links.csv @@ -0,0 +1,21 @@ +batchv1.ConcurrencyPolicy,https://pkg.go.dev/k8s.io/api/batch/v1#ConcurrencyPolicy +corev1.ConditionStatus,https://pkg.go.dev/k8s.io/api/core/v1#ConditionStatus +corev1.EnvFromSource,https://pkg.go.dev/k8s.io/api/core/v1#EnvFromSource +corev1.EnvVar,https://pkg.go.dev/k8s.io/api/core/v1#EnvVar +corev1.PersistentVolumeClaimSpec,https://pkg.go.dev/k8s.io/api/core/v1#PersistentVolumeClaimSpec +corev1.PodSpec,https://pkg.go.dev/k8s.io/api/core/v1#PodSpec +corev1.ServiceSpec,https://pkg.go.dev/k8s.io/api/core/v1#ServiceSpec +corev1.VolumeSource,https://pkg.go.dev/k8s.io/api/core/v1#VolumeSource +metav1.Duration,https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration +metav1.ListMeta,https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#ListMeta +metav1.ObjectMeta,https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta +metav1.Time,https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Time +resource.Quantity,https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity +PersistentVolumeClaimSpecApplyConfiguration,https://pkg.go.dev/k8s.io/client-go/applyconfigurations/core/v1#PersistentVolumeClaimSpecApplyConfiguration +PodSpecApplyConfiguration,https://pkg.go.dev/k8s.io/client-go/applyconfigurations/core/v1#PodSpecApplyConfiguration +ServiceSpecApplyConfiguration,https://pkg.go.dev/k8s.io/client-go/applyconfigurations/core/v1#ServiceSpecApplyConfiguration +VolumeSourceApplyConfiguration,https://pkg.go.dev/k8s.io/client-go/applyconfigurations/core/v1#VolumeSourceApplyConfiguration +EnvFromSourceApplyConfiguration,https://pkg.go.dev/k8s.io/client-go/applyconfigurations/core/v1#EnvFromSourceApplyConfiguration +EnvVarApplyConfiguration,https://pkg.go.dev/k8s.io/client-go/applyconfigurations/core/v1#EnvVarApplyConfiguration +AffinityApplyConfiguration,https://pkg.go.dev/k8s.io/client-go/applyconfigurations/core/v1#AffinityApplyConfiguration +ResourceRequirementsApplyConfiguration,https://pkg.go.dev/k8s.io/client-go/applyconfigurations/core/v1#ResourceRequirementsApplyConfiguration \ No newline at end of file diff --git a/go.mod b/go.mod index fc7102b9..86e0eabe 100644 --- a/go.mod +++ b/go.mod @@ -70,7 +70,7 @@ require ( github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pkg/errors v0.9.1 // indirect + github.com/pkg/errors v0.9.1 github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pquerna/otp v1.2.0 // indirect github.com/prometheus/client_golang v1.11.1 // indirect diff --git a/mysqlcluster/container/init_sidecar.go b/mysqlcluster/container/init_sidecar.go index c590d5e3..426325d2 100644 --- a/mysqlcluster/container/init_sidecar.go +++ b/mysqlcluster/container/init_sidecar.go @@ -199,6 +199,10 @@ func (c *initSidecar) getVolumeMounts() []corev1.VolumeMount { Name: utils.SysLocalTimeZone, MountPath: utils.SysLocalTimeZoneMountPath, }, + { + Name: utils.MySQLcheckerVolumeName, + MountPath: utils.RadonDBBinDir, + }, } if c.Spec.TlsSecretName != "" { volumeMounts = append(volumeMounts, diff --git a/mysqlcluster/container/init_sidecar_test.go b/mysqlcluster/container/init_sidecar_test.go index 241ffbb2..48cd922b 100644 --- a/mysqlcluster/container/init_sidecar_test.go +++ b/mysqlcluster/container/init_sidecar_test.go @@ -302,6 +302,10 @@ var ( Name: utils.SysLocalTimeZone, MountPath: utils.SysLocalTimeZoneMountPath, }, + { + Name: utils.MySQLcheckerVolumeName, + MountPath: utils.RadonDBBinDir, + }, } initSidecarCase = EnsureContainer("init-sidecar", &testInitSidecarCluster) ) @@ -445,7 +449,7 @@ func TestGetInitSidecarVolumeMounts(t *testing.T) { MysqlCluster: &testToKuDBMysqlCluster, } tokudbCase := EnsureContainer("init-sidecar", &testTokuDBCluster) - tokuDBVolumeMounts := make([]corev1.VolumeMount, 8, 9) + tokuDBVolumeMounts := make([]corev1.VolumeMount, 9) copy(tokuDBVolumeMounts, defaultInitsidecarVolumeMounts) tokuDBVolumeMounts = append(tokuDBVolumeMounts, corev1.VolumeMount{ Name: utils.SysVolumeName, @@ -461,7 +465,7 @@ func TestGetInitSidecarVolumeMounts(t *testing.T) { MysqlCluster: &testPersistenceMysqlCluster, } persistenceCase := EnsureContainer("init-sidecar", &testPersistenceCluster) - persistenceVolumeMounts := make([]corev1.VolumeMount, 8, 9) + persistenceVolumeMounts := make([]corev1.VolumeMount, 9) copy(persistenceVolumeMounts, defaultInitsidecarVolumeMounts) persistenceVolumeMounts = append(persistenceVolumeMounts, corev1.VolumeMount{ Name: utils.DataVolumeName, diff --git a/mysqlcluster/container/mysql.go b/mysqlcluster/container/mysql.go index 54add740..a3aa97f6 100644 --- a/mysqlcluster/container/mysql.go +++ b/mysqlcluster/container/mysql.go @@ -122,7 +122,7 @@ func (c *mysql) getLivenessProbe() *corev1.Probe { Command: []string{ "/usr/bin/bash", "-c", - "mysqlchecker liveness", + "/opt/radondb/mysqlchecker liveness", }, }, }, @@ -142,7 +142,7 @@ func (c *mysql) getReadinessProbe() *corev1.Probe { Command: []string{ "/usr/bin/bash", "-c", - "mysqlchecker readiness", + "/opt/radondb/mysqlchecker readiness", }, }, }, @@ -173,6 +173,10 @@ func (c *mysql) getVolumeMounts() []corev1.VolumeMount { Name: utils.SysLocalTimeZone, MountPath: utils.SysLocalTimeZoneMountPath, }, + { + Name: utils.MySQLcheckerVolumeName, + MountPath: utils.RadonDBBinDir, + }, } if c.Spec.TlsSecretName != "" { volumeMounts = append(volumeMounts, diff --git a/mysqlcluster/container/mysql_test.go b/mysqlcluster/container/mysql_test.go index 8ec17e51..ec7c9b1e 100644 --- a/mysqlcluster/container/mysql_test.go +++ b/mysqlcluster/container/mysql_test.go @@ -132,7 +132,7 @@ func TestGetMysqlLivenessProbe(t *testing.T) { livenessProbe := &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ Exec: &corev1.ExecAction{ - Command: []string{"/usr/bin/bash", "-c", "mysqlchecker liveness"}, + Command: []string{"/usr/bin/bash", "-c", "/opt/radondb/mysqlchecker liveness"}, }, }, InitialDelaySeconds: 30, @@ -148,7 +148,7 @@ func TestGetMysqlReadinessProbe(t *testing.T) { readinessProbe := &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ Exec: &corev1.ExecAction{ - Command: []string{"/usr/bin/bash", "-c", "mysqlchecker readiness"}, + Command: []string{"/usr/bin/bash", "-c", "/opt/radondb/mysqlchecker readiness"}, }, }, InitialDelaySeconds: 10, @@ -178,6 +178,10 @@ func TestGetMysqlVolumeMounts(t *testing.T) { Name: utils.SysLocalTimeZone, MountPath: "/etc/localtime", }, + { + Name: utils.MySQLcheckerVolumeName, + MountPath: "/opt/radondb", + }, } assert.Equal(t, volumeMounts, mysqlCase.VolumeMounts) } diff --git a/mysqlcluster/mysqlcluster.go b/mysqlcluster/mysqlcluster.go index 88f00b30..8e38905f 100644 --- a/mysqlcluster/mysqlcluster.go +++ b/mysqlcluster/mysqlcluster.go @@ -276,6 +276,14 @@ func (c *MysqlCluster) EnsureVolumes() []corev1.Volume { }, }) } + + // Add mysql checker volume + volumes = append(volumes, corev1.Volume{ + Name: utils.MySQLcheckerVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }) return volumes } diff --git a/mysqlcluster/mysqlcluster_test.go b/mysqlcluster/mysqlcluster_test.go index 8efe01c2..64cfa423 100644 --- a/mysqlcluster/mysqlcluster_test.go +++ b/mysqlcluster/mysqlcluster_test.go @@ -318,6 +318,12 @@ func TestEnsureVolumes(t *testing.T) { }, }, }, + { + Name: utils.MySQLcheckerVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, } // when disable Persistence { diff --git a/sidecar/backup.go b/sidecar/backup.go new file mode 100644 index 00000000..a1a5d5f7 --- /dev/null +++ b/sidecar/backup.go @@ -0,0 +1,226 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sidecar + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + "strconv" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/radondb/radondb-mysql-kubernetes/utils" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +type BackupClientConfig struct { + BackupName string `json:"backup_name"` + NameSpace string `json:"namespace"` + ServiceName string `json:"service_name"` + BackupUser string `json:"backup_user"` + BackupPassword string `json:"backup_password"` + JobName string `json:"job_name"` + ClusterName string `json:"cluster_name"` + RootPassword string `json:"root_password"` + XCloudS3EndPoint string `json:"xcloud_s3_endpoint"` + XCloudS3AccessKey string `json:"xcloud_s3_access_key"` + XCloudS3SecretKey string `json:"xcloud_s3_secret_key"` + XCloudS3Bucket string `json:"xcloud_s3_bucket"` + // NFS server which Restore from + XRestoreFromNFS string `json:"xrestore_from_nfs"` + // XtrabackupExtraArgs is a list of extra command line arguments to pass to xtrabackup. + XtrabackupExtraArgs []string `json:"xtrabackup_extra_args"` + // XtrabackupTargetDir is a backup destination directory for xtrabackup. + XtrabackupTargetDir string `json:"xtrabackup_target_dir"` + // BackupType is a backup type for xtrabackup. s3 or disk + BackupType BkType `json:"backup_type"` +} + +type BkType string + +const ( + // BackupTypeS3 is a backup type for xtrabackup. s3 + S3 BkType = "s3" + // BackupTypeDisk is a backup type for xtrabackup. disk + NFS BkType = "nfs" +) + +// NewReqBackupConfig returns the configuration file needed for backup job call /backup. +// The configuration file is obtained from the environment variables. +func NewReqBackupConfig() *BackupClientConfig { + BackupName, _ := utils.BuildBackupName(getEnvValue("CLUSTER_NAME")) + return &BackupClientConfig{ + NameSpace: getEnvValue("NAMESPACE"), + ServiceName: getEnvValue("SERVICE_NAME"), + BackupUser: getEnvValue("BACKUP_USER"), + BackupPassword: getEnvValue("BACKUP_PASSWORD"), + JobName: getEnvValue("JOB_NAME"), + ClusterName: getEnvValue("CLUSTER_NAME"), + RootPassword: getEnvValue("MYSQL_ROOT_PASSWORD"), + XCloudS3EndPoint: getEnvValue("S3_ENDPOINT"), + XCloudS3AccessKey: getEnvValue("S3_ACCESSKEY"), + XCloudS3SecretKey: getEnvValue("S3_SECRETKEY"), + XCloudS3Bucket: getEnvValue("S3_BUCKET"), + BackupName: BackupName, + BackupType: BkType(getEnvValue("BACKUP_TYPE")), + } +} + +// Build xbcloud arguments +func (cfg *BackupClientConfig) XCloudArgs(backupName string) []string { + xcloudArgs := []string{ + "put", + "--storage=S3", + fmt.Sprintf("--s3-endpoint=%s", cfg.XCloudS3EndPoint), + fmt.Sprintf("--s3-access-key=%s", cfg.XCloudS3AccessKey), + fmt.Sprintf("--s3-secret-key=%s", cfg.XCloudS3SecretKey), + fmt.Sprintf("--s3-bucket=%s", cfg.XCloudS3Bucket), + "--parallel=10", + // utils.BuildBackupName(cfg.ClusterName), + cfg.BackupName, + "--insecure", + } + return xcloudArgs +} + +func (cfg *BackupClientConfig) XtrabackupArgs() []string { + // xtrabackup --backup --target-dir= + tmpdir := "/root/backup/" + if len(cfg.XtrabackupTargetDir) != 0 { + tmpdir = cfg.XtrabackupTargetDir + } + xtrabackupArgs := []string{ + "--backup", + "--stream=xbstream", + "--host=127.0.0.1", + fmt.Sprintf("--user=%s", utils.RootUser), + fmt.Sprintf("--password=%s", cfg.RootPassword), + fmt.Sprintf("--target-dir=%s", tmpdir), + } + + return append(xtrabackupArgs, cfg.XtrabackupExtraArgs...) +} + +func (cfg *BackupClientConfig) XBackupName() (string, string) { + return utils.BuildBackupName(cfg.ClusterName) +} + +func setAnnonations(cfg *BackupClientConfig, backname string, DateTime string, BackupType string, BackupSize int64) error { + config, err := rest.InClusterConfig() + if err != nil { + return err + } + // creates the clientset + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return err + } + + job, err := clientset.BatchV1().Jobs(cfg.NameSpace).Get(context.TODO(), cfg.JobName, metav1.GetOptions{}) + if err != nil { + return err + } + if job.Annotations == nil { + job.Annotations = make(map[string]string) + } + job.Annotations[utils.JobAnonationName] = backname + job.Annotations[utils.JobAnonationDate] = DateTime + job.Annotations[utils.JobAnonationType] = BackupType + job.Annotations[utils.JobAnonationSize] = strconv.FormatInt(BackupSize, 10) + _, err = clientset.BatchV1().Jobs(cfg.NameSpace).Update(context.TODO(), job, metav1.UpdateOptions{}) + if err != nil { + return err + } + return nil +} + +func RunTakeS3BackupCommand(cfg *BackupClientConfig) (string, string, int64, error) { + // cfg->XtrabackupArgs() + xtrabackup := exec.Command(xtrabackupCommand, cfg.XtrabackupArgs()...) + + var err error + backupName, DateTime := cfg.XBackupName() + xcloud := exec.Command(xcloudCommand, cfg.XCloudArgs(backupName)...) + log.Info("xargs ", "xargs", strings.Join(cfg.XCloudArgs(backupName), " ")) + + // Create a pipe between xtrabackup and xcloud + r, w := io.Pipe() + defer r.Close() + xcloud.Stdin = r + + // Start xtrabackup command with stdout directed to the pipe + xtrabackupReader, err := xtrabackup.StdoutPipe() + if err != nil { + log.Error(err, "failed to create stdout pipe for xtrabackup") + return "", "", 0, err + } + + // set xtrabackup and xcloud stderr to os.Stderr + xtrabackup.Stderr = os.Stderr + xcloud.Stderr = os.Stderr + + // Start xtrabackup and xcloud in separate goroutines + if err := xtrabackup.Start(); err != nil { + log.Error(err, "failed to start xtrabackup command") + return "", "", 0, err + } + if err := xcloud.Start(); err != nil { + log.Error(err, "fail start xcloud ") + return "", "", 0, err + } + + // Use io.Copy to write xtrabackup output to the pipe while tracking the number of bytes written + var n int64 + go func() { + n, err = io.Copy(w, xtrabackupReader) + if err != nil { + log.Error(err, "failed to write xtrabackup output to pipe") + } + w.Close() + }() + + // Wait for xtrabackup and xcloud to finish + // pipe command fail one, whole things fail + errorChannel := make(chan error, 2) + go func() { + errorChannel <- xcloud.Wait() + }() + go func() { + errorChannel <- xtrabackup.Wait() + }() + + for i := 0; i < 2; i++ { + if err = <-errorChannel; err != nil { + // If xtrabackup or xcloud failed, stop the pipe and kill the other command + log.Error(err, "xtrabackup or xcloud failed closing the pipe...") + xtrabackup.Process.Kill() + xcloud.Process.Kill() + return "", "", 0, err + } + } + + // Log backup size and upload speed + backupSizeMB := float64(n) / (1024 * 1024) + log.Info(fmt.Sprintf("Backup size: %.2f MB", backupSizeMB)) + + return backupName, DateTime, n, nil +} diff --git a/sidecar/client_backup.go b/sidecar/client_backup.go new file mode 100644 index 00000000..7baa9fa7 --- /dev/null +++ b/sidecar/client_backup.go @@ -0,0 +1,147 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sidecar + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "os/exec" + + "github.com/radondb/radondb-mysql-kubernetes/utils" +) + +func requestS3Backup(cfg *BackupClientConfig, host string, endpoint string) (*http.Response, error) { + + log.Info("initialize a backup", "host", host, "endpoint", endpoint) + reqBody, err := json.Marshal(cfg) + if err != nil { + log.Error(err, "fail to marshal request body") + return nil, fmt.Errorf("fail to marshal request body: %s", err) + } + + req, err := http.NewRequest("POST", prepareURL(host, endpoint), bytes.NewBuffer(reqBody)) + if err != nil { + return nil, fmt.Errorf("fail to create request: %s", err) + } + + // set authentication user and password + req.SetBasicAuth(cfg.BackupUser, cfg.BackupPassword) + + client := &http.Client{} + client.Transport = transportWithTimeout(serverConnectTimeout) + + resp, err := client.Do(req) + if err != nil || resp.StatusCode != 200 { + status := "unknown" + if resp != nil { + status = resp.Status + } + return nil, fmt.Errorf("fail to get backup: %s, code: %s", err, status) + } + defer resp.Body.Close() + var result utils.JsonResult + json.NewDecoder(resp.Body).Decode(&result) + + err = setAnnonations(cfg, result.BackupName, result.Date, "S3", result.BackupSize) // set annotation + if err != nil { + return nil, fmt.Errorf("fail to set annotation: %s", err) + } + return resp, nil +} + +func requestNFSBackup(cfg *BackupClientConfig, host string, endpoint string) error { + log.Info("initializing a NFS backup", "host", host, "endpoint", endpoint) + + backupName, DateTime := cfg.XBackupName() + + reqBody, err := json.Marshal(cfg) + if err != nil { + return fmt.Errorf("failed to marshal request body: %w", err) + } + + req, err := http.NewRequest("GET", prepareURL(host, endpoint), bytes.NewBuffer(reqBody)) + if err != nil { + return fmt.Errorf("failed to create request: %w", err) + } + req.SetBasicAuth(cfg.BackupUser, cfg.BackupPassword) + + client := &http.Client{ + Transport: transportWithTimeout(serverConnectTimeout), + } + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("failed to get backup: %w", err) + } + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("failed to get backup: HTTP status %s", resp.Status) + } + defer resp.Body.Close() + + // Create the backup dir + // Backupdir is the name of the backup + backupPath := fmt.Sprintf("%s/%s", "/backup", backupName) + if err := os.MkdirAll(backupPath, 0755); err != nil { + return fmt.Errorf("failed to create backup dir: %w", err) + } + + // Create a pipe for the xbstream command to read from. + rc, wc := io.Pipe() + cmd := exec.Command("xbstream", "-x", "-C", backupPath) + cmd.Stdin = rc + cmd.Stderr = os.Stderr + + // Start xbstream command. + if err := cmd.Start(); err != nil { + return fmt.Errorf("failed to start xbstream cmd: %w", err) + } + + // Write the response body to the pipe. + copyErr := make(chan error) + var n int64 + go func() { + n, err = io.Copy(wc, resp.Body) + wc.Close() + copyErr <- err + }() + + // Wait for the xbstream command to finish. + cmdErr := make(chan error) + go func() { + cmdErr <- cmd.Wait() + }() + + if err := <-copyErr; err != nil { + return fmt.Errorf("failed to write to pipe: %w", err) + } + + if err := <-cmdErr; err != nil { + return fmt.Errorf("xbstream command failed: %w", err) + } + + if err := setAnnonations(cfg, backupName, DateTime, "nfs", n); err != nil { + return fmt.Errorf("failed to set annotation: %w", err) + } + log.Info("backup completed", "backupName", backupName, "backupSize", n) + + return nil +} diff --git a/sidecar/config.go b/sidecar/config.go index 27d0c121..239dd97a 100644 --- a/sidecar/config.go +++ b/sidecar/config.go @@ -106,32 +106,24 @@ type Config struct { // XtrabackupTargetDir is a backup destination directory for xtrabackup. XtrabackupTargetDir string - // S3 endpoint domain name - XCloudS3EndPoint string - - // S3 access key - XCloudS3AccessKey string - - // S3 secrete key - XCloudS3SecretKey string - - // S3 Bucket names - XCloudS3Bucket string - - // directory in S3 bucket for cluster restore from - XRestoreFrom string - // Clone flag CloneFlag bool // GtidPurged is the gtid set of the slave cluster to purged. GtidPurged string - // NFS server which Restore from - XRestoreFromNFS string + // XRestoreFromNFS string // User customized initsql. InitSQL string + + // directory in S3 bucket for cluster restore from + XRestoreFrom string + XRestoreFromNFS string + XCloudS3EndPoint string + XCloudS3AccessKey string + XCloudS3SecretKey string + XCloudS3Bucket string } // NewInitConfig returns a pointer to Config. @@ -221,24 +213,24 @@ func NewBackupConfig() *Config { BackupUser: getEnvValue("BACKUP_USER"), BackupPassword: getEnvValue("BACKUP_PASSWORD"), - XCloudS3EndPoint: getEnvValue("S3_ENDPOINT"), - XCloudS3AccessKey: getEnvValue("S3_ACCESSKEY"), - XCloudS3SecretKey: getEnvValue("S3_SECRETKEY"), - XCloudS3Bucket: getEnvValue("S3_BUCKET"), + // XCloudS3EndPoint: getEnvValue("S3_ENDPOINT"), + // XCloudS3AccessKey: getEnvValue("S3_ACCESSKEY"), + // XCloudS3SecretKey: getEnvValue("S3_SECRETKEY"), + // XCloudS3Bucket: getEnvValue("S3_BUCKET"), } } // NewReqBackupConfig returns the configuration file needed for backup job. -func NewReqBackupConfig() *Config { - return &Config{ - NameSpace: getEnvValue("NAMESPACE"), - ServiceName: getEnvValue("SERVICE_NAME"), - - BackupUser: getEnvValue("BACKUP_USER"), - BackupPassword: getEnvValue("BACKUP_PASSWORD"), - JobName: getEnvValue("JOB_NAME"), - } -} +// func NewReqBackupConfig() *Config { +// return &Config{ +// NameSpace: getEnvValue("NAMESPACE"), +// ServiceName: getEnvValue("SERVICE_NAME"), + +// BackupUser: getEnvValue("BACKUP_USER"), +// BackupPassword: getEnvValue("BACKUP_PASSWORD"), +// JobName: getEnvValue("JOB_NAME"), +// } +// } // GetContainerType returns the CONTAINER_TYPE of the currently running container. // CONTAINER_TYPE used to mark the container type. @@ -479,12 +471,16 @@ func (cfg *Config) buildClientConfig() (*ini.File, error) { // return utils.StringToBytes(str) // } -/* The function is equivalent to the following shell script template: +/* + The function is equivalent to the following shell script template: + #!/bin/sh if [ ! -d {{.DataDir}} ] ; then - echo "is not exist the var lib mysql" - mkdir {{.DataDir}} - chown -R mysql.mysql {{.DataDir}} + + echo "is not exist the var lib mysql" + mkdir {{.DataDir}} + chown -R mysql.mysql {{.DataDir}} + fi mkdir /root/backup xbcloud get --storage=S3 \ @@ -658,19 +654,20 @@ func GetXtrabackupGTIDPurged(backuppath string) (string, error) { /* `#!/bin/sh - if [ ! -d {{.DataDir}} ]; then - echo "is not exist the var lib mysql" - mkdir {{.DataDir}} - chown -R mysql.mysql {{.DataDir}} - fi - rm -rf {{.DataDir}}/* - xtrabackup --defaults-file={{.MyCnfMountPath}} --use-memory=3072M --prepare --apply-log-only --target-dir=/backup/{{.XRestoreFrom}} - xtrabackup --defaults-file={{.MyCnfMountPath}} --use-memory=3072M --prepare --target-dir=/backup/{{.XRestoreFrom}} - chown -R mysql.mysql /backup/{{.XRestoreFromNFS}} - xtrabackup --defaults-file={{.MyCnfMountPath}} --datadir={{.DataDir}} --copy-back --target-dir=/backup/{{.XRestoreFrom}} - exit_code=$? - chown -R mysql.mysql {{.DataDir}} - exit $exit_code + + if [ ! -d {{.DataDir}} ]; then + echo "is not exist the var lib mysql" + mkdir {{.DataDir}} + chown -R mysql.mysql {{.DataDir}} + fi + rm -rf {{.DataDir}}/* + xtrabackup --defaults-file={{.MyCnfMountPath}} --use-memory=3072M --prepare --apply-log-only --target-dir=/backup/{{.XRestoreFrom}} + xtrabackup --defaults-file={{.MyCnfMountPath}} --use-memory=3072M --prepare --target-dir=/backup/{{.XRestoreFrom}} + chown -R mysql.mysql /backup/{{.XRestoreFromNFS}} + xtrabackup --defaults-file={{.MyCnfMountPath}} --datadir={{.DataDir}} --copy-back --target-dir=/backup/{{.XRestoreFrom}} + exit_code=$? + chown -R mysql.mysql {{.DataDir}} + exit $exit_code */ func (cfg *Config) ExecuteNFSRestore() error { if len(cfg.XRestoreFromNFS) == 0 { diff --git a/sidecar/init.go b/sidecar/init.go index 4ae83767..1d7fdadd 100644 --- a/sidecar/init.go +++ b/sidecar/init.go @@ -167,6 +167,12 @@ func runInitCommand(cfg *Config, hasInitialized bool) error { if exists, _ := checkIfPathExists(utils.TlsMountPath); exists { buildSSLdata() } + // copy mysqlchecker to /opt/radondb/ + if exists, _ := checkIfPathExists(utils.RadonDBBinDir); exists { + log.Info("copy mysqlchecker to /opt/radondb/") + copyMySQLchecker() + } + buildDefaultXenonMeta(uid, gid) // build client.conf. @@ -276,9 +282,16 @@ func RunHttpServer(cfg *Config, stop <-chan struct{}) error { } // request a backup command. -func RunRequestBackup(cfg *Config, host string) error { - _, err := requestABackup(cfg, host, serverBackupEndpoint) - return err +func RunRequestBackup(cfg *BackupClientConfig, host string) error { + if cfg.BackupType == S3 { + _, err := requestS3Backup(cfg, host, serverBackupEndpoint) + return err + } + if cfg.BackupType == NFS { + err := requestNFSBackup(cfg, host, serverBackupDownLoadEndpoint) + return err + } + return fmt.Errorf("unknown backup type: %s", cfg.BackupType) } // Save plugin.cnf and extra.cnf to specified path. @@ -342,6 +355,20 @@ func buildSSLdata() error { return nil } +func copyMySQLchecker() error { + cpCmd := "cp /mnt/mysqlchecker " + utils.RadonDBBinDir + cmd := exec.Command("sh", "-c", cpCmd) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to copy mysqlchecker: %s", err) + } + chownCmd := "chown -R mysql.mysql " + utils.RadonDBBinDir + cmd = exec.Command("sh", "-c", chownCmd) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to chown mysqlchecker: %s", err) + } + return nil +} + func getPod(cfg *Config) (string, error) { log.Info("Now check the pod which has got rebuild-from") config, err := rest.InClusterConfig() diff --git a/sidecar/server.go b/sidecar/server.go index 8eccc654..dbefb531 100644 --- a/sidecar/server.go +++ b/sidecar/server.go @@ -28,10 +28,6 @@ import ( "strings" "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "github.com/radondb/radondb-mysql-kubernetes/utils" ) @@ -71,11 +67,13 @@ func newServer(cfg *Config, stop <-chan struct{}) *server { } // Add handle functions. + // Health check mux.HandleFunc(serverProbeEndpoint, srv.healthHandler) + // Backup server mux.Handle(serverBackupEndpoint, maxClients(http.HandlerFunc(srv.backupHandler), 1)) - + // Backup download server. mux.Handle(serverBackupDownLoadEndpoint, - maxClients(http.HandlerFunc(srv.backupDownLoadHandler), 1)) + maxClients(http.HandlerFunc(srv.backupDownloadHandler), 1)) // Shutdown gracefully the http server. go func() { @@ -99,21 +97,34 @@ func (s *server) healthHandler(w http.ResponseWriter, r *http.Request) { func (s *server) backupHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Connection", "keep-alive") w.Header().Set("content-type", "text/json") + + // Extract backup name from POST body + + var requestBody BackupClientConfig + err := json.NewDecoder(r.Body).Decode(&requestBody) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if !s.isAuthenticated(r) { http.Error(w, "Not authenticated!", http.StatusForbidden) return } - backName, Datetime, err := RunTakeBackupCommand(s.cfg) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } else { - msg, _ := json.Marshal(utils.JsonResult{Status: backupSuccessful, BackupName: backName, Date: Datetime}) - w.Write(msg) + // /backup only handle S3 backup + if requestBody.BackupType == S3 { + + backName, Datetime, backupSize, err := RunTakeS3BackupCommand(&requestBody) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } else { + msg, _ := json.Marshal(utils.JsonResult{Status: backupSuccessful, BackupName: backName, Date: Datetime, BackupSize: backupSize}) + w.Write(msg) + } } } -// DownLoad handler. -func (s *server) backupDownLoadHandler(w http.ResponseWriter, r *http.Request) { +func (s *server) backupDownloadHandler(w http.ResponseWriter, r *http.Request) { if !s.isAuthenticated(r) { http.Error(w, "Not authenticated!", http.StatusForbidden) @@ -210,65 +221,3 @@ func transportWithTimeout(connectTimeout time.Duration) http.RoundTripper { ExpectContinueTimeout: 1 * time.Second, } } - -func setAnnonations(cfg *Config, backname string, DateTime string, BackupType string) error { - config, err := rest.InClusterConfig() - if err != nil { - return err - } - // creates the clientset - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - return err - } - - job, err := clientset.BatchV1().Jobs(cfg.NameSpace).Get(context.TODO(), cfg.JobName, metav1.GetOptions{}) - if err != nil { - return err - } - if job.Annotations == nil { - job.Annotations = make(map[string]string) - } - job.Annotations[utils.JobAnonationName] = backname - job.Annotations[utils.JobAnonationDate] = DateTime - job.Annotations[utils.JobAnonationType] = BackupType - _, err = clientset.BatchV1().Jobs(cfg.NameSpace).Update(context.TODO(), job, metav1.UpdateOptions{}) - if err != nil { - return err - } - return nil -} - -// requestABackup connects to specified host and endpoint and gets the backup. -func requestABackup(cfg *Config, host string, endpoint string) (*http.Response, error) { - log.Info("initialize a backup", "host", host, "endpoint", endpoint) - - req, err := http.NewRequest("GET", prepareURL(host, endpoint), nil) - if err != nil { - return nil, fmt.Errorf("fail to create request: %s", err) - } - - // set authentication user and password - req.SetBasicAuth(cfg.BackupUser, cfg.BackupPassword) - - client := &http.Client{} - client.Transport = transportWithTimeout(serverConnectTimeout) - - resp, err := client.Do(req) - if err != nil || resp.StatusCode != 200 { - status := "unknown" - if resp != nil { - status = resp.Status - } - return nil, fmt.Errorf("fail to get backup: %s, code: %s", err, status) - } - defer resp.Body.Close() - var result utils.JsonResult - json.NewDecoder(resp.Body).Decode(&result) - - err = setAnnonations(cfg, result.BackupName, result.Date, "S3") // set annotation - if err != nil { - return nil, fmt.Errorf("fail to set annotation: %s", err) - } - return resp, nil -} diff --git a/sidecar/takebackup.go b/sidecar/takebackup.go deleted file mode 100644 index c06a3ee5..00000000 --- a/sidecar/takebackup.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2021 RadonDB. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sidecar - -import ( - "os" - "os/exec" - "strings" -) - -// RunTakeBackupCommand starts a backup command -func RunTakeBackupCommand(cfg *Config) (string, string, error) { - // cfg->XtrabackupArgs() - xtrabackup := exec.Command(xtrabackupCommand, cfg.XtrabackupArgs()...) - - var err error - backupName, DateTime := cfg.XBackupName() - xcloud := exec.Command(xcloudCommand, cfg.XCloudArgs(backupName)...) - log.Info("xargs ", "xargs", strings.Join(cfg.XCloudArgs(backupName), " ")) - if xcloud.Stdin, err = xtrabackup.StdoutPipe(); err != nil { - log.Error(err, "failed to pipline") - return "", "", err - } - xtrabackup.Stderr = os.Stderr - xcloud.Stderr = os.Stderr - - if err := xtrabackup.Start(); err != nil { - log.Error(err, "failed to start xtrabackup command") - return "", "", err - } - if err := xcloud.Start(); err != nil { - log.Error(err, "fail start xcloud ") - return "", "", err - } - - // pipe command fail one, whole things fail - errorChannel := make(chan error, 2) - go func() { - errorChannel <- xcloud.Wait() - }() - go func() { - errorChannel <- xtrabackup.Wait() - }() - - for i := 0; i < 2; i++ { - if err = <-errorChannel; err != nil { - return "", "", err - } - } - return backupName, DateTime, nil -} diff --git a/utils/constants.go b/utils/constants.go index 8cab16bf..75f26880 100644 --- a/utils/constants.go +++ b/utils/constants.go @@ -81,16 +81,17 @@ const ( BackupUser = "sys_backup" // volumes names. - MysqlConfVolumeName = "mysql-conf" - MysqlCMVolumeName = "mysql-cm" - XenonMetaVolumeName = "xenon-meta" - XenonCMVolumeName = "xenon-cm" - LogsVolumeName = "logs" - DataVolumeName = "data" - SysVolumeName = "host-sys" - ScriptsVolumeName = "scripts" - XenonConfVolumeName = "xenon-conf" - InitFileVolumeName = "init-mysql" + MysqlConfVolumeName = "mysql-conf" + MysqlCMVolumeName = "mysql-cm" + XenonMetaVolumeName = "xenon-meta" + XenonCMVolumeName = "xenon-cm" + LogsVolumeName = "logs" + DataVolumeName = "data" + SysVolumeName = "host-sys" + ScriptsVolumeName = "scripts" + XenonConfVolumeName = "xenon-conf" + InitFileVolumeName = "init-mysql" + MySQLcheckerVolumeName = "mysql-checker" // volumes mount path. MysqlConfVolumeMountPath = "/etc/mysql" @@ -129,6 +130,9 @@ const ( TlsVolumeName = "tls" // TlsMountPath is the volume mount path for tls TlsMountPath = "/etc/mysql-ssl" + + // RadonDB excutable files dir + RadonDBBinDir = "/opt/radondb" ) // ResourceName is the type for aliasing resources that will be created. @@ -165,6 +169,8 @@ const ( JobAnonationDate = "backupDate" // Job Annonations type JobAnonationType = "backupType" + // Job Annonations size + JobAnonationSize = "backupSize" ) // JobType @@ -198,6 +204,7 @@ type JsonResult struct { Status string `json:"status"` BackupName string `json:"backupName"` Date string `json:"date"` + BackupSize int64 `json:"backupSize"` } // MySQLDefaultVersionMap is a map of supported mysql version and their image diff --git a/utils/incluster.go b/utils/incluster.go index e3d39f06..113e3ae0 100644 --- a/utils/incluster.go +++ b/utils/incluster.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package utils import ( diff --git a/utils/incluster_test.go b/utils/incluster_test.go index abeb8703..4bb9c979 100644 --- a/utils/incluster_test.go +++ b/utils/incluster_test.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 RadonDB. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package utils import (