Skip to content

Commit

Permalink
feat(singleNode): Support running single node. radondb#361
Browse files Browse the repository at this point in the history
  • Loading branch information
acekingke committed Jul 8, 2022
1 parent ed0e763 commit e5d2537
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 9 deletions.
2 changes: 1 addition & 1 deletion api/v1alpha1/mysqlcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ type MysqlClusterSpec struct {

// Replicas is the number of pods.
// +optional
// +kubebuilder:validation:Enum=0;2;3;5
// +kubebuilder:validation:Enum=0;1;2;3;5
// +kubebuilder:default:=3
Replicas *int32 `json:"replicas,omitempty"`

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1255,6 +1255,7 @@ spec:
description: Replicas is the number of pods.
enum:
- 0
- 1
- 2
- 3
- 5
Expand Down
1 change: 1 addition & 0 deletions config/crd/bases/mysql.radondb.com_mysqlclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1255,6 +1255,7 @@ spec:
description: Replicas is the number of pods.
enum:
- 0
- 1
- 2
- 3
- 5
Expand Down
8 changes: 7 additions & 1 deletion mysqlcluster/syncer/statefulset.go
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,13 @@ func (s *StatefulSetSyncer) ensurePodSpec() corev1.PodSpec {
mysql := container.EnsureContainer(utils.ContainerMysqlName, s.MysqlCluster)
xenon := container.EnsureContainer(utils.ContainerXenonName, s.MysqlCluster)
backup := container.EnsureContainer(utils.ContainerBackupName, s.MysqlCluster)
containers := []corev1.Container{mysql, xenon, backup}
var containers []corev1.Container
if *s.Spec.Replicas == 1 {
containers = []corev1.Container{mysql, backup}
} else {
containers = []corev1.Container{mysql, xenon, backup}
}

if s.Spec.MetricsOpts.Enabled {
containers = append(containers, container.EnsureContainer(utils.ContainerMetricsName, s.MysqlCluster))
}
Expand Down
24 changes: 17 additions & 7 deletions mysqlcluster/syncer/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,7 @@ func (s *StatusSyncer) updateNodeStatus(ctx context.Context, cli client.Client,

if !utils.ExistUpdateFile() &&
node.RaftStatus.Role == string(utils.Leader) &&
*s.Spec.Replicas != 1 &&
isReadOnly != corev1.ConditionFalse {
s.log.V(1).Info("try to correct the leader writeable", "node", node.Name)
sqlRunner.QueryExec(internal.NewQuery("SET GLOBAL read_only=off"))
Expand Down Expand Up @@ -371,15 +372,20 @@ func (s *StatusSyncer) updateNodeRaftStatus(node *apiv1alpha1.NodeStatus) error
Leader: "UNKNOWN",
Nodes: nil,
}

raftStatus, err := s.XenonExecutor.RaftStatus(node.Name)
if err == nil && raftStatus != nil {
node.RaftStatus = *raftStatus
if raftStatus.Role == string(utils.Leader) {
isLeader = corev1.ConditionTrue
var err error = nil
if *s.Spec.Replicas != 1 {
var raftStatus *apiv1alpha1.RaftStatus
raftStatus, err = s.XenonExecutor.RaftStatus(node.Name)
if err == nil && raftStatus != nil {
node.RaftStatus = *raftStatus
if raftStatus.Role == string(utils.Leader) {
isLeader = corev1.ConditionTrue
}
}
} else {
node.RaftStatus.Role = string(utils.Leader)
isLeader = corev1.ConditionTrue
}

// update apiv1alpha1.NodeConditionLeader.
s.updateNodeCondition(node, int(apiv1alpha1.IndexLeader), isLeader)
return err
Expand Down Expand Up @@ -448,6 +454,10 @@ func (s *StatusSyncer) updatePodLabel(ctx context.Context, pod *corev1.Pod, node
healthy = "yes"
}
}
// replica doesn't need to check the slave status.
if *s.Spec.Replicas != 1 {
healthy = "yes"
}
if pod.DeletionTimestamp != nil || pod.Status.Phase != corev1.PodRunning {
healthy = "no"
node.RaftStatus.Role = string(utils.Unknown)
Expand Down

0 comments on commit e5d2537

Please sign in to comment.