From ed107922daec542260a00549cbe745690bc055ae Mon Sep 17 00:00:00 2001 From: acekingke Date: Wed, 6 Jul 2022 12:48:48 +0800 Subject: [PATCH] feat(singleNode): Support running single node. #361 --- api/v1alpha1/mysqlcluster_types.go | 2 +- .../crds/mysql.radondb.com_mysqlclusters.yaml | 1 + .../mysql.radondb.com_mysqlclusters.yaml | 1 + mysqlcluster/mysqlcluster.go | 5 +++- mysqlcluster/syncer/statefulset.go | 8 ++++++- mysqlcluster/syncer/status.go | 24 +++++++++++++------ 6 files changed, 31 insertions(+), 10 deletions(-) diff --git a/api/v1alpha1/mysqlcluster_types.go b/api/v1alpha1/mysqlcluster_types.go index 601be304a..998617a55 100644 --- a/api/v1alpha1/mysqlcluster_types.go +++ b/api/v1alpha1/mysqlcluster_types.go @@ -31,7 +31,7 @@ type MysqlClusterSpec struct { // Replicas is the number of pods. // +optional - // +kubebuilder:validation:Enum=0;2;3;5 + // +kubebuilder:validation:Enum=0;1;2;3;5 // +kubebuilder:default:=3 Replicas *int32 `json:"replicas,omitempty"` diff --git a/charts/mysql-operator/crds/mysql.radondb.com_mysqlclusters.yaml b/charts/mysql-operator/crds/mysql.radondb.com_mysqlclusters.yaml index 873e38134..2236b198d 100644 --- a/charts/mysql-operator/crds/mysql.radondb.com_mysqlclusters.yaml +++ b/charts/mysql-operator/crds/mysql.radondb.com_mysqlclusters.yaml @@ -1255,6 +1255,7 @@ spec: description: Replicas is the number of pods. enum: - 0 + - 1 - 2 - 3 - 5 diff --git a/config/crd/bases/mysql.radondb.com_mysqlclusters.yaml b/config/crd/bases/mysql.radondb.com_mysqlclusters.yaml index 873e38134..2236b198d 100644 --- a/config/crd/bases/mysql.radondb.com_mysqlclusters.yaml +++ b/config/crd/bases/mysql.radondb.com_mysqlclusters.yaml @@ -1255,6 +1255,7 @@ spec: description: Replicas is the number of pods. enum: - 0 + - 1 - 2 - 3 - 5 diff --git a/mysqlcluster/mysqlcluster.go b/mysqlcluster/mysqlcluster.go index b1d8bcf67..93bc7f1f4 100644 --- a/mysqlcluster/mysqlcluster.go +++ b/mysqlcluster/mysqlcluster.go @@ -363,7 +363,10 @@ func (c *MysqlCluster) EnsureMysqlConf() { defaultSize = uint64(0.6 * float64(mem)) maxSize = uint64(0.8 * float64(mem)) } - + // Single Node Do not need ReadOnly + if *c.Spec.Replicas == 1 { + c.Spec.MysqlOpts.MysqlConf["read_only"] = "OFF" + } conf, ok := c.Spec.MysqlOpts.MysqlConf["innodb_buffer_pool_size"] if !ok { innodbBufferPoolSize = utils.Max(defaultSize, innodbBufferPoolSize) diff --git a/mysqlcluster/syncer/statefulset.go b/mysqlcluster/syncer/statefulset.go index 5f5215065..f82562564 100644 --- a/mysqlcluster/syncer/statefulset.go +++ b/mysqlcluster/syncer/statefulset.go @@ -427,7 +427,13 @@ func (s *StatefulSetSyncer) ensurePodSpec() corev1.PodSpec { mysql := container.EnsureContainer(utils.ContainerMysqlName, s.MysqlCluster) xenon := container.EnsureContainer(utils.ContainerXenonName, s.MysqlCluster) backup := container.EnsureContainer(utils.ContainerBackupName, s.MysqlCluster) - containers := []corev1.Container{mysql, xenon, backup} + var containers []corev1.Container + if *s.Spec.Replicas == 1 { + containers = []corev1.Container{mysql, backup} + } else { + containers = []corev1.Container{mysql, xenon, backup} + } + if s.Spec.MetricsOpts.Enabled { containers = append(containers, container.EnsureContainer(utils.ContainerMetricsName, s.MysqlCluster)) } diff --git a/mysqlcluster/syncer/status.go b/mysqlcluster/syncer/status.go index f52f37019..19236b2a2 100644 --- a/mysqlcluster/syncer/status.go +++ b/mysqlcluster/syncer/status.go @@ -287,6 +287,7 @@ func (s *StatusSyncer) updateNodeStatus(ctx context.Context, cli client.Client, if !utils.ExistUpdateFile() && node.RaftStatus.Role == string(utils.Leader) && + *s.Spec.Replicas != 1 && isReadOnly != corev1.ConditionFalse { s.log.V(1).Info("try to correct the leader writeable", "node", node.Name) sqlRunner.QueryExec(internal.NewQuery("SET GLOBAL read_only=off")) @@ -371,15 +372,20 @@ func (s *StatusSyncer) updateNodeRaftStatus(node *apiv1alpha1.NodeStatus) error Leader: "UNKNOWN", Nodes: nil, } - - raftStatus, err := s.XenonExecutor.RaftStatus(node.Name) - if err == nil && raftStatus != nil { - node.RaftStatus = *raftStatus - if raftStatus.Role == string(utils.Leader) { - isLeader = corev1.ConditionTrue + var err error = nil + if *s.Spec.Replicas != 1 { + var raftStatus *apiv1alpha1.RaftStatus + raftStatus, err = s.XenonExecutor.RaftStatus(node.Name) + if err == nil && raftStatus != nil { + node.RaftStatus = *raftStatus + if raftStatus.Role == string(utils.Leader) { + isLeader = corev1.ConditionTrue + } } + } else { + node.RaftStatus.Role = string(utils.Leader) + isLeader = corev1.ConditionTrue } - // update apiv1alpha1.NodeConditionLeader. s.updateNodeCondition(node, int(apiv1alpha1.IndexLeader), isLeader) return err @@ -448,6 +454,10 @@ func (s *StatusSyncer) updatePodLabel(ctx context.Context, pod *corev1.Pod, node healthy = "yes" } } + // replica doesn't need to check the slave status. + if *s.Spec.Replicas != 1 { + healthy = "yes" + } if pod.DeletionTimestamp != nil || pod.Status.Phase != corev1.PodRunning { healthy = "no" node.RaftStatus.Role = string(utils.Unknown)