diff --git a/api/v1alpha1/backup_types.go b/api/v1alpha1/backup_types.go index 0c8502edb..bf33b44c3 100644 --- a/api/v1alpha1/backup_types.go +++ b/api/v1alpha1/backup_types.go @@ -35,7 +35,12 @@ type BackupSpec struct { Image string `json:"image"` // HostName represents the host for which to take backup - HostName string `json:"hostname"` + // If is empty, is use leader HostName + HostName string `json:"hostname,omitempty"` + + // Represents the name of backup to NFS + // +optional + BackupToNFS string `json:"BackupToNFS,omitempty"` // Cluster represents the cluster name to backup ClusterName string `json:"clustname"` diff --git a/api/v1alpha1/cluster_types.go b/api/v1alpha1/cluster_types.go index d63460e84..dcc4e325c 100644 --- a/api/v1alpha1/cluster_types.go +++ b/api/v1alpha1/cluster_types.go @@ -81,6 +81,10 @@ type ClusterSpec struct { // Represents the name of the cluster restore from backup path // +optional RestoreFrom string `json:"restoreFrom,omitempty"` + + // Represents the name of the cluster restore from NFS + // +optional + RestoreFromNFS string `json:"restoreFromNFS,omitempty"` } // MysqlOpts defines the options of MySQL container. diff --git a/backup/backup.go b/backup/backup.go index bb11f8e70..66fcd8e47 100644 --- a/backup/backup.go +++ b/backup/backup.go @@ -49,7 +49,11 @@ func (b *Backup) GetNameForJob() string { return fmt.Sprintf("%s-backup", b.Name) } -// Create the backup Domain Name. +// Create the backup Domain Name or leader DNS func (b *Backup) GetBackupURL(cluster_name string, hostname string) string { - return fmt.Sprintf("%s.%s-mysql.%s:%v", hostname, cluster_name, b.Namespace, utils.XBackupPort) + if len(hostname) != 0 { + return fmt.Sprintf("%s.%s-mysql.%s:%v", hostname, cluster_name, b.Namespace, utils.XBackupPort) + } else { + return fmt.Sprintf("%s-leader.%s:%v", cluster_name, b.Namespace, utils.XBackupPort) + } } diff --git a/backup/syncer/job.go b/backup/syncer/job.go index 366e1c623..61a2dc7a3 100644 --- a/backup/syncer/job.go +++ b/backup/syncer/job.go @@ -115,9 +115,41 @@ func (s *jobSyncer) ensurePodSpec(in corev1.PodSpec) corev1.PodSpec { sctName := fmt.Sprintf("%s-secret", s.backup.Spec.ClusterName) in.Containers[0].Name = utils.ContainerBackupName in.Containers[0].Image = s.backup.Spec.Image - in.Containers[0].Args = []string{ - "request_a_backup", - s.backup.GetBackupURL(s.backup.Spec.ClusterName, s.backup.Spec.HostName), + + if len(s.backup.Spec.BackupToNFS) != 0 { + // add volumn about pvc + in.Volumes = []corev1.Volume{ + { + Name: utils.XtrabackupPV, + VolumeSource: corev1.VolumeSource{ + NFS: &corev1.NFSVolumeSource{ + Server: s.backup.Spec.BackupToNFS, + Path: "/", + }, + }, + }, + } + //"rm -rf /backup/*;curl --user sys_backups:sys_backups sample-mysql-0.sample-mysql.default:8082/download|xbstream -x -C /backup" + in.Containers[0].Command = []string{ + "/bin/bash", "-c", "--", + } + var backupToDir string = utils.BuildBackupName() + in.Containers[0].Args = []string{ + fmt.Sprintf("mkdir -p /backup/%s;curl --user $BACKUP_USER:$BACKUP_PASSWORD %s/download|xbstream -x -C /backup/%s; exit ${PIPESTATUS[0]}", + backupToDir, s.backup.GetBackupURL(s.backup.Spec.ClusterName, s.backup.Spec.HostName), backupToDir), + } + in.Containers[0].VolumeMounts = []corev1.VolumeMount{ + { + Name: utils.XtrabackupPV, + MountPath: utils.XtrabckupLocal, + }, + } + } else { + // in.Containers[0].ImagePullPolicy = s.opt.ImagePullPolicy + in.Containers[0].Args = []string{ + "request_a_backup", + s.backup.GetBackupURL(s.backup.Spec.ClusterName, s.backup.Spec.HostName), + } } var optTrue bool = true in.Containers[0].Env = []corev1.EnvVar{ diff --git a/charts/mysql-operator/crds/mysql.radondb.com_backups.yaml b/charts/mysql-operator/crds/mysql.radondb.com_backups.yaml index 294e73019..c2564e260 100644 --- a/charts/mysql-operator/crds/mysql.radondb.com_backups.yaml +++ b/charts/mysql-operator/crds/mysql.radondb.com_backups.yaml @@ -36,6 +36,9 @@ spec: spec: description: BackupSpec defines the desired state of Backup properties: + BackupToNFS: + description: Represents the name of backup to NFS + type: string clustname: description: Cluster represents the cluster name to backup type: string @@ -46,6 +49,7 @@ spec: type: integer hostname: description: HostName represents the host for which to take backup + If is empty, is use leader HostName type: string image: default: radondb/mysql-sidecar:latest @@ -53,7 +57,6 @@ spec: type: string required: - clustname - - hostname type: object status: description: BackupStatus defines the observed state of Backup diff --git a/charts/mysql-operator/crds/mysql.radondb.com_clusters.yaml b/charts/mysql-operator/crds/mysql.radondb.com_clusters.yaml index 37aae4e1d..13e94f4e0 100644 --- a/charts/mysql-operator/crds/mysql.radondb.com_clusters.yaml +++ b/charts/mysql-operator/crds/mysql.radondb.com_clusters.yaml @@ -1243,6 +1243,9 @@ spec: description: Represents the name of the cluster restore from backup path type: string + restoreFromNFS: + description: Represents the name of the cluster restore from NFS + type: string xenonOpts: default: admitDefeatHearbeatCount: 5 diff --git a/cluster/cluster.go b/cluster/cluster.go index 30a4927dc..9abf66975 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -211,7 +211,18 @@ func (c *Cluster) EnsureVolumes() []corev1.Volume { }, }, ) - + // add the nfs volumn mount + if len(c.Spec.RestoreFromNFS) != 0 { + volumes = append(volumes, corev1.Volume{ + Name: utils.XtrabackupPV, + VolumeSource: corev1.VolumeSource{ + NFS: &corev1.NFSVolumeSource{ + Server: c.Spec.RestoreFromNFS, + Path: "/", + }, + }, + }) + } return volumes } diff --git a/cluster/container/init_sidecar.go b/cluster/container/init_sidecar.go index 99e8f2edb..cd488e6ee 100644 --- a/cluster/container/init_sidecar.go +++ b/cluster/container/init_sidecar.go @@ -99,7 +99,10 @@ func (c *initSidecar) getEnvVars() []corev1.EnvVar { Name: "RESTORE_FROM", Value: c.Spec.RestoreFrom, }, - + { + Name: "CLUSTER_NAME", + Value: c.Name, + }, getEnvVarFromSecret(sctName, "MYSQL_ROOT_PASSWORD", "root-password", false), getEnvVarFromSecret(sctName, "MYSQL_DATABASE", "mysql-database", true), getEnvVarFromSecret(sctName, "MYSQL_USER", "mysql-user", true), @@ -124,7 +127,12 @@ func (c *initSidecar) getEnvVars() []corev1.EnvVar { getEnvVarFromSecret(sctNamebackup, "S3_BUCKET", "s3-bucket", true), ) } - + if len(c.Spec.RestoreFromNFS) != 0 { + envs = append(envs, corev1.EnvVar{ + Name: "RESTORE_FROM_NFS", + Value: c.Spec.RestoreFromNFS, + }) + } if c.Spec.MysqlOpts.InitTokuDB { envs = append(envs, corev1.EnvVar{ Name: "INIT_TOKUDB", @@ -194,6 +202,15 @@ func (c *initSidecar) getVolumeMounts() []corev1.VolumeMount { ) } + if len(c.Spec.RestoreFromNFS) != 0 { + volumeMounts = append(volumeMounts, + corev1.VolumeMount{ + Name: utils.XtrabackupPV, + MountPath: utils.XtrabckupLocal, + }, + ) + } + if c.Spec.Persistence.Enabled { volumeMounts = append(volumeMounts, corev1.VolumeMount{ diff --git a/cluster/syncer/follower_service.go b/cluster/syncer/follower_service.go index fa77b84f1..e2b79d0da 100644 --- a/cluster/syncer/follower_service.go +++ b/cluster/syncer/follower_service.go @@ -46,13 +46,17 @@ func NewFollowerSVCSyncer(cli client.Client, c *cluster.Cluster) syncer.Interfac service.Spec.Selector["role"] = "follower" service.Spec.Selector["healthy"] = "yes" - if len(service.Spec.Ports) != 1 { - service.Spec.Ports = make([]corev1.ServicePort, 1) + if len(service.Spec.Ports) != 2 { + service.Spec.Ports = make([]corev1.ServicePort, 2) } service.Spec.Ports[0].Name = utils.MysqlPortName service.Spec.Ports[0].Port = utils.MysqlPort service.Spec.Ports[0].TargetPort = intstr.FromInt(utils.MysqlPort) + // xtrabackup + service.Spec.Ports[1].Name = utils.XBackupPortName + service.Spec.Ports[1].Port = utils.XBackupPort + service.Spec.Ports[1].TargetPort = intstr.FromInt(utils.XBackupPort) return nil }) } diff --git a/cluster/syncer/leader_service.go b/cluster/syncer/leader_service.go index 31abb73c3..76a7ed8b3 100644 --- a/cluster/syncer/leader_service.go +++ b/cluster/syncer/leader_service.go @@ -45,13 +45,18 @@ func NewLeaderSVCSyncer(cli client.Client, c *cluster.Cluster) syncer.Interface service.Spec.Selector = c.GetSelectorLabels() service.Spec.Selector["role"] = "leader" - if len(service.Spec.Ports) != 1 { - service.Spec.Ports = make([]corev1.ServicePort, 1) + if len(service.Spec.Ports) != 2 { + service.Spec.Ports = make([]corev1.ServicePort, 2) } service.Spec.Ports[0].Name = utils.MysqlPortName service.Spec.Ports[0].Port = utils.MysqlPort service.Spec.Ports[0].TargetPort = intstr.FromInt(utils.MysqlPort) + + // xtrabackup + service.Spec.Ports[1].Name = utils.XBackupPortName + service.Spec.Ports[1].Port = utils.XBackupPort + service.Spec.Ports[1].TargetPort = intstr.FromInt(utils.XBackupPort) return nil }) } diff --git a/config/crd/bases/mysql.radondb.com_backups.yaml b/config/crd/bases/mysql.radondb.com_backups.yaml index 294e73019..c2564e260 100644 --- a/config/crd/bases/mysql.radondb.com_backups.yaml +++ b/config/crd/bases/mysql.radondb.com_backups.yaml @@ -36,6 +36,9 @@ spec: spec: description: BackupSpec defines the desired state of Backup properties: + BackupToNFS: + description: Represents the name of backup to NFS + type: string clustname: description: Cluster represents the cluster name to backup type: string @@ -46,6 +49,7 @@ spec: type: integer hostname: description: HostName represents the host for which to take backup + If is empty, is use leader HostName type: string image: default: radondb/mysql-sidecar:latest @@ -53,7 +57,6 @@ spec: type: string required: - clustname - - hostname type: object status: description: BackupStatus defines the observed state of Backup diff --git a/config/crd/bases/mysql.radondb.com_clusters.yaml b/config/crd/bases/mysql.radondb.com_clusters.yaml index 37aae4e1d..13e94f4e0 100644 --- a/config/crd/bases/mysql.radondb.com_clusters.yaml +++ b/config/crd/bases/mysql.radondb.com_clusters.yaml @@ -1243,6 +1243,9 @@ spec: description: Represents the name of the cluster restore from backup path type: string + restoreFromNFS: + description: Represents the name of the cluster restore from NFS + type: string xenonOpts: default: admitDefeatHearbeatCount: 5 diff --git a/config/samples/backup_secret.yaml b/config/samples/backup_secret.yaml index 93fa9e2e7..47e86e6e6 100644 --- a/config/samples/backup_secret.yaml +++ b/config/samples/backup_secret.yaml @@ -1,5 +1,5 @@ kind: Secret -apiVersion: v1 +apiVersion: mysql.radondb.com/v1alpha1 metadata: name: sample-backup-secret namespace: default diff --git a/config/samples/mysql_v1alpha1_backup.yaml b/config/samples/mysql_v1alpha1_backup.yaml index 7fd5a66e4..bef50f783 100644 --- a/config/samples/mysql_v1alpha1_backup.yaml +++ b/config/samples/mysql_v1alpha1_backup.yaml @@ -5,5 +5,8 @@ metadata: spec: # Add fields here image: radondb/mysql-sidecar:latest + # hostname if empty, use the leader as hostname hostname: sample-mysql-0 clustname: sample + # BackupToNFS: "IP of NFS server" + \ No newline at end of file diff --git a/config/samples/mysql_v1alpha1_cluster.yaml b/config/samples/mysql_v1alpha1_cluster.yaml index 03d6c5d9d..81f4caff4 100644 --- a/config/samples/mysql_v1alpha1_cluster.yaml +++ b/config/samples/mysql_v1alpha1_cluster.yaml @@ -10,9 +10,12 @@ spec: # if you want S3 backup or restore, please create backup_secret.yaml, uncomment below and fill secret name: # backupSecretName: - # if you want create cluster from S3 , uncomment and fill the directory in S3 bucket below: + # if you want create cluster from S3 ,if you want create cluser from NFS + # uncomment and fill the directory in S3 bucket or NFS server diectory below: # restoreFrom: + # Restore from NFS, uncomment below and set the ip of NFS server + # restoreFromNFS: mysqlOpts: rootPassword: "" rootHost: localhost diff --git a/config/samples/nfs_rc.yaml b/config/samples/nfs_rc.yaml new file mode 100644 index 000000000..40434327e --- /dev/null +++ b/config/samples/nfs_rc.yaml @@ -0,0 +1,34 @@ + +apiVersion: v1 +kind: ReplicationController +metadata: + name: nfs-server +spec: + replicas: 1 + selector: + role: nfs-server + template: + metadata: + labels: + role: nfs-server + spec: + containers: + - name: nfs-server + image: gcr.azk8s.cn/google_containers/volume-nfs:0.8 + ports: + - name: nfs + containerPort: 2049 + - name: mountd + containerPort: 20048 + - name: rpcbind + containerPort: 111 + securityContext: + privileged: true + volumeMounts: + - mountPath: /exports + name: nfs-export-fast + volumes: + - name: nfs-export-fast + persistentVolumeClaim: + claimName: neosan1 + diff --git a/config/samples/nfs_server.yaml b/config/samples/nfs_server.yaml new file mode 100644 index 000000000..9654d1583 --- /dev/null +++ b/config/samples/nfs_server.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: nfs-server +spec: + ports: + - name: nfs + port: 2049 + - name: mountd + port: 20048 + - name: rpcbind + port: 111 + selector: + role: nfs-server diff --git a/docs/deploy_backup_restore_nfs.md b/docs/deploy_backup_restore_nfs.md new file mode 100644 index 000000000..6e0835ab3 --- /dev/null +++ b/docs/deploy_backup_restore_nfs.md @@ -0,0 +1,56 @@ +# mysql-operator + +## Quickstart for NFS backup + +### Create NFS server +first create your PVC, such as "neosan1", fill it to `config/samples/nfs_rc.yaml ` +```yaml +... + volumes: + - name: nfs-export-fast + persistentVolumeClaim: + claimName: neosan1 +``` + ```shell +# create the nfs pod +kubectl apply -f config/samples/nfs_rc.yaml +# create the nfs service +kubectl apply -f config/samples/nfs_server.yaml + ``` +if create the nfs server successful, get the then: + +## config `mysql_v1alpha1_backup.yaml ` and backup +Add field in `mysql_v1alpha1_backup.yaml ` as follow: +```yaml +BackupToNFS: "IP of NFS server" +``` +use commadn as follow to backup +```shell +kubectl apply -f config/samples/mysql_v1alpha1_backup.yaml +``` + + ## Restore cluster from exist NFS backup + first, configure the `mysql_v1alpha1_cluster.yaml`, uncomment the `restoreFromNFS` field: + ```yaml + .... + restoreFrom: "backup_202196152239" + restoreFromNFS : 10.96.253.82 + ``` + `backup_202196152239` is the nfs server backup path, change it to yours. + the `10.96.253.82` is the NFS server ip, change it to yours. + use command as follow to create cluster from NFS server backup copy: + + ## build your own image + such as : + ``` + docker build -f Dockerfile.sidecar -t acekingke/sidecar:0.1 . && docker push acekingke/sidecar:0.1 + docker build -t acekingke/controller:0.1 . && docker push acekingke/controller:0.1 + ``` + you can replace acekingke/sidecar:0.1 with your own tag + + ## deploy your own manager +```shell +make manifests +make install +make deploy IMG=acekingke/controller:0.1 KUSTOMIZE=~/radondb-mysql-kubernetes/bin/kustomize +``` diff --git a/docs/deploy_backup_restore_s3.md b/docs/deploy_backup_restore_s3.md index 11fbcd8cc..6c468b5bb 100644 --- a/docs/deploy_backup_restore_s3.md +++ b/docs/deploy_backup_restore_s3.md @@ -1,6 +1,6 @@ # mysql-operator -## Quickstart for backup +## Quickstart for backup S3 Install the operator named `test`: @@ -8,7 +8,7 @@ Install the operator named `test`: helm install test charts/mysql-operator ``` -### configure backup +### configure backup for S3 add the secret file ```yaml @@ -110,6 +110,47 @@ kubectl apply -f config/samples/mysql_v1alpha1_cluster.yaml ``` could restore a cluster from the `backup_2021720827 ` copy in the S3 bucket. +if you want backup to NFS server or restore from NFS server, do it as follow: + +## Quickstart for NFS backup + +### Create NFS server +first create your PVC, such as "neosan1", fill it to `config/samples/nfs_rc.yaml ` +```yaml +... + volumes: + - name: nfs-export-fast + persistentVolumeClaim: + claimName: neosan1 +``` + ```shell +# create the nfs pod +kubectl apply -f config/samples/nfs_rc.yaml +# create the nfs service +kubectl apply -f config/samples/nfs_server.yaml + ``` +if create the nfs server successful, get the then: + +## config `mysql_v1alpha1_backup.yaml ` and backup +Add field in `mysql_v1alpha1_backup.yaml ` as follow: +```yaml +BackupToNFS: "IP of NFS server" +``` +use commadn as follow to backup +```shell +kubectl apply -f config/samples/mysql_v1alpha1_backup.yaml +``` + + ## Restore cluster from exist NFS backup + first, configure the `mysql_v1alpha1_cluster.yaml`, uncomment the `restoreFromNFS` field: + ```yaml + .... + restoreFrom: "backup_202196152239" + restoreFromNFS : 10.96.253.82 + ``` + `backup_202196152239` is the nfs server backup path, change it to yours. + the `10.96.253.82` is the NFS server ip, change it to yours. + use command as follow to create cluster from NFS server backup copy: ## build your own image such as : diff --git a/sidecar/config.go b/sidecar/config.go index 8a08c9f30..3960a893c 100644 --- a/sidecar/config.go +++ b/sidecar/config.go @@ -118,6 +118,9 @@ type Config struct { // directory in S3 bucket for cluster restore from XRestoreFrom string + + // NFS server which Restore from + XRestoreFromNFS string } // NewInitConfig returns the configuration file needed for initialization. @@ -182,6 +185,7 @@ func NewInitConfig() *Config { existMySQLData: existMySQLData, XRestoreFrom: getEnvValue("RESTORE_FROM"), + XRestoreFromNFS: getEnvValue("RESTORE_FROM_NFS"), XCloudS3EndPoint: getEnvValue("S3_ENDPOINT"), XCloudS3AccessKey: getEnvValue("S3_ACCESSKEY"), XCloudS3SecretKey: getEnvValue("S3_SECRETKEY"), @@ -602,3 +606,54 @@ rm -rf /root/backup } return nil } + +// build nfs restore +func (cfg *Config) buildNFSRestore(path string) error { + if len(cfg.XRestoreFrom) == 0 { + return fmt.Errorf("Do not have restore from") + } + if len(cfg.XRestoreFromNFS) == 0 { + return fmt.Errorf("Do not the nfs to restore") + } + + f, err := os.Create(path) + if err != nil { + return fmt.Errorf("create restore.sh fail : %s", err) + } + defer func() { + f.Close() + }() + restoresh := `#!/bin/sh + if [ ! -d {{.DataDir}} ]; then + echo "is not exist the var lib mysql" + mkdir {{.DataDir}} + chown -R mysql.mysql {{.DataDir}} + fi + rm -rf {{.DataDir}}/* + xtrabackup --defaults-file={{.MyCnfMountPath}} --use-memory=3072M --prepare --apply-log-only --target-dir=/backup/{{.XRestoreFrom}} + xtrabackup --defaults-file={{.MyCnfMountPath}} --use-memory=3072M --prepare --target-dir=/backup/{{.XRestoreFrom}} + chown -R mysql.mysql /backup/{{.XRestoreFromNFS}} + xtrabackup --defaults-file={{.MyCnfMountPath}} --datadir={{.DataDir}} --copy-back --target-dir=/backup/{{.XRestoreFrom}} + exit_code=$? + chown -R mysql.mysql {{.DataDir}} + exit $exit_code + ` + template_restore := template.New("restore.sh") + template_restore, err = template_restore.Parse(restoresh) + if err != nil { + return err + } + err2 := template_restore.Execute(f, struct { + Config + DataDir string + MyCnfMountPath string + }{ + *cfg, + utils.DataVolumeMountPath, + utils.ConfVolumeMountPath + "/my.cnf", + }) + if err2 != nil { + return err2 + } + return nil +} diff --git a/sidecar/init.go b/sidecar/init.go index 6bc0d3581..a9b9c6286 100644 --- a/sidecar/init.go +++ b/sidecar/init.go @@ -165,7 +165,14 @@ func runInitCommand(cfg *Config) error { // run the restore if len(cfg.XRestoreFrom) != 0 { var restoreName string = "/restore.sh" - err_f := cfg.buildS3Restore(restoreName) + var err_f error + // first from NFS, if not, then from S3 + if len(cfg.XRestoreFromNFS) != 0 { + err_f = cfg.buildNFSRestore(restoreName) + } else { + err_f = cfg.buildS3Restore(restoreName) + } + if err_f != nil { return fmt.Errorf("build restore.sh fail : %s", err_f) } diff --git a/sidecar/server.go b/sidecar/server.go index 806412a67..29f92486f 100644 --- a/sidecar/server.go +++ b/sidecar/server.go @@ -19,8 +19,11 @@ package sidecar import ( "context" "fmt" + "io" "net" "net/http" + "os" + "os/exec" "strings" "time" @@ -28,10 +31,22 @@ import ( ) const ( + // backupStatus http trailer + backupStatusTrailer = "X-Backup-Status" + + // success string + backupSuccessful = "Success" + + // failure string + backupFailed = "Failed" + serverPort = utils.XBackupPort serverProbeEndpoint = "/health" serverBackupEndpoint = "/xbackup" serverConnectTimeout = 5 * time.Second + + // DownLoad server url. + serverBackupDownLoadEndpoint = "/download" ) type server struct { @@ -54,6 +69,9 @@ func newServer(cfg *Config, stop <-chan struct{}) *server { mux.HandleFunc(serverProbeEndpoint, srv.healthHandler) mux.Handle(serverBackupEndpoint, maxClients(http.HandlerFunc(srv.backupHandler), 1)) + mux.Handle(serverBackupDownLoadEndpoint, + maxClients(http.HandlerFunc(srv.backupDownLoadHandler), 1)) + // Shutdown gracefully the http server. go func() { <-stop // wait for stop signal @@ -79,7 +97,7 @@ func (s *server) backupHandler(w http.ResponseWriter, r *http.Request) { http.Error(w, "Not authenticated!", http.StatusForbidden) return } - err := RunTakeBackupCommand(s.cfg, "") + err := RunTakeBackupCommand(s.cfg) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } else { @@ -87,6 +105,64 @@ func (s *server) backupHandler(w http.ResponseWriter, r *http.Request) { } } +// Down hanlder. +func (s *server) backupDownLoadHandler(w http.ResponseWriter, r *http.Request) { + + if !s.isAuthenticated(r) { + http.Error(w, "Not authenticated!", http.StatusForbidden) + return + } + + flusher, ok := w.(http.Flusher) + if !ok { + http.Error(w, "HTTP server does not support streaming!", http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Connection", "keep-alive") + w.Header().Set("Trailer", backupStatusTrailer) + + // nolint: gosec + xtrabackup := exec.Command(xtrabackupCommand, s.cfg.XtrabackupArgs()...) + xtrabackup.Stderr = os.Stderr + + stdout, err := xtrabackup.StdoutPipe() + if err != nil { + log.Error(err, "failed to create stdout pipe") + http.Error(w, "xtrabackup failed", http.StatusInternalServerError) + return + } + + defer func() { + // don't care + _ = stdout.Close() + }() + + if err := xtrabackup.Start(); err != nil { + log.Error(err, "failed to start xtrabackup command") + http.Error(w, "xtrabackup failed", http.StatusInternalServerError) + return + } + + if _, err := io.Copy(w, stdout); err != nil { + log.Error(err, "failed to copy buffer") + http.Error(w, "buffer copy failed", http.StatusInternalServerError) + return + } + + if err := xtrabackup.Wait(); err != nil { + log.Error(err, "failed waiting for xtrabackup to finish") + w.Header().Set(backupStatusTrailer, backupFailed) + http.Error(w, "xtrabackup failed", http.StatusInternalServerError) + return + } + + // success + w.Header().Set(backupStatusTrailer, backupSuccessful) + flusher.Flush() +} + func (s *server) isAuthenticated(r *http.Request) bool { user, pass, ok := r.BasicAuth() return ok && user == s.cfg.BackupUser && pass == s.cfg.BackupPassword diff --git a/sidecar/takebackup.go b/sidecar/takebackup.go index 76d796d8f..2490af6e2 100644 --- a/sidecar/takebackup.go +++ b/sidecar/takebackup.go @@ -23,7 +23,7 @@ import ( ) // RunTakeBackupCommand starts a backup command -func RunTakeBackupCommand(cfg *Config, name string) error { +func RunTakeBackupCommand(cfg *Config) error { // cfg->XtrabackupArgs() xtrabackup := exec.Command(xtrabackupCommand, cfg.XtrabackupArgs()...) diff --git a/utils/constants.go b/utils/constants.go index d20a87012..995b8a5c1 100644 --- a/utils/constants.go +++ b/utils/constants.go @@ -46,8 +46,12 @@ const ( ContainerBackupName = "backup" ContainerBackupJobName = "backup-job" + // xtrabackup XBackupPortName = "xtrabackup" XBackupPort = 8082 + XtrabackupPV = "backup" + XtrabckupLocal = "/backup" + // MySQL port. MysqlPortName = "mysql" MysqlPort = 3306