Skip to content

Commit

Permalink
feat: support arm arch && new version cmd (#2546)
Browse files Browse the repository at this point in the history
  • Loading branch information
xiaohan1202 authored Feb 10, 2023
1 parent 4192033 commit d68fa93
Show file tree
Hide file tree
Showing 9 changed files with 74 additions and 147 deletions.
8 changes: 8 additions & 0 deletions controllers/cluster/config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,11 @@ rules:
- patch
- update
- watch
- apiGroups:
- infra.sealos.io
resources:
- infras/status
verbs:
- get
- patch
- update
75 changes: 43 additions & 32 deletions controllers/cluster/controllers/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,12 @@ package controllers

import (
"context"
"encoding/json"
"errors"
"fmt"
"path/filepath"
"strings"
"time"

version2 "github.com/labring/sealos/pkg/version"

"github.com/labring/sealos/controllers/cluster/utils"

"github.com/go-logr/logr"
Expand All @@ -52,15 +50,15 @@ import (
const (
defaultUser = "root"
defaultSealosVersion = "4.1.4"
defaultSealosArch = "amd64"
defaultWorkDir = "/root/.sealos"
defaultClusterName = "default"
defaultClusterFileName = "Clusterfile"
)
const (
applyClusterfileCmd = "sealos apply -f /root/Clusterfile"
preDownload = `sealos version || wget https://ghproxy.com/https://github.com/labring/sealos/releases/download/v%[1]s/sealos_%[1]s_linux_amd64.tar.gz && tar -zxvf sealos_%[1]s_linux_amd64.tar.gz sealos && chmod +x sealos && mv sealos /usr/bin`
sealosVersionCmd = "sealos version"
downloadSealosCmd = `wget https://ghproxy.com/https://github.com/labring/sealos/releases/download/v%[1]s/sealos_%[1]s_linux_amd64.tar.gz && tar -zxvf sealos_%[1]s_linux_amd64.tar.gz sealos && chmod +x sealos && mv sealos /usr/bin`
sealosVersionCmd = "sealos version --short"
downloadSealosCmd = `wget https://ghproxy.com/https://github.com/labring/sealos/releases/download/v%[1]s/sealos_%[1]s_linux_%[2]s.tar.gz && tar -zxvf sealos_%[1]s_linux_%[2]s.tar.gz sealos && chmod +x sealos && mv sealos /usr/bin`
getClusterfileCmd = "cat %s"
)

Expand All @@ -79,6 +77,7 @@ type ClusterReconciler struct {
//+kubebuilder:rbac:groups=cluster.sealos.io,resources=clusters/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=cluster.sealos.io,resources=clusters/finalizers,verbs=update
//+kubebuilder:rbac:groups=infra.sealos.io,resources=infras,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=infra.sealos.io,resources=infras/status,verbs=get;update;patch

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
Expand Down Expand Up @@ -127,33 +126,29 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
return ctrl.Result{Requeue: true}, nil
}

// generate new clusterfile
var newClusterFile string
if err != nil && err.Error() == errClusterFileNotExists {
newCluster := generateClusterFromInfra(infra, cluster)
newClusterFile, err := convertClusterToYaml(newCluster)
newClusterFile, err = convertClusterToYaml(newCluster)
if err != nil {
r.recorder.Event(cluster, corev1.EventTypeWarning, "GenerateClusterfile", "generate clusterfile failed")
return ctrl.Result{Requeue: true, RequeueAfter: time.Second * 60}, err
}

if err := applyClusterfile(c, EIP, newClusterFile, getSealosVersion(cluster)); err != nil {
r.recorder.Event(cluster, corev1.EventTypeWarning, "ApplyClusterfile", "apply clusterfile failed")
return ctrl.Result{Requeue: true, RequeueAfter: time.Second * 60}, err
}
} else {
desiredCluster := generateClusterFromInfra(infra, cluster)
newCluster := mergeCluster(currentCluster, desiredCluster)
newClusterFile, err := convertClusterToYaml(newCluster)
r.Logger.Info("new clusterfile", "clusterfile: ", newClusterFile)

newClusterFile, err = convertClusterToYaml(newCluster)
if err != nil {
r.recorder.Event(cluster, corev1.EventTypeWarning, "GenerateClusterfile", err.Error())
return ctrl.Result{Requeue: true, RequeueAfter: time.Second * 60}, err
}
}

if err := applyClusterfile(c, EIP, newClusterFile, getSealosVersion(cluster)); err != nil {
r.recorder.Event(cluster, corev1.EventTypeWarning, "ApplyClusterfile", err.Error())
return ctrl.Result{Requeue: true, RequeueAfter: time.Second * 60}, err
}
// apply new clusterfile
if err := applyClusterfile(c, EIP, newClusterFile, getSealosVersion(cluster), getSealosArch(infra)); err != nil {
r.recorder.Event(cluster, corev1.EventTypeWarning, "ApplyClusterfile", err.Error())
return ctrl.Result{Requeue: true, RequeueAfter: time.Second * 60}, err
}

// update cluster status
Expand Down Expand Up @@ -209,7 +204,6 @@ func mergeCluster(currentCluster *v1beta1.Cluster, desiredCluster *v1.Cluster) *
}

func convertClusterToYaml(cluster interface{}) (string, error) {
// convert cluster to yaml
newClusterfile, err := yaml.Marshal(cluster)
if err != nil {
return "", fmt.Errorf("marshal cluster to yaml failed: %v", err)
Expand Down Expand Up @@ -254,6 +248,16 @@ func getSealosVersion(cluster *v1.Cluster) string {
return defaultSealosVersion
}

// get sealos arch from infra
func getSealosArch(infra *infrav1.Infra) string {
for _, host := range infra.Spec.Hosts {
if host.Roles[0] == v1beta1.MASTER {
return host.Arch
}
}
return defaultSealosArch
}

func waitAllInstanceSSHReady(c ssh.Interface, ips []string) error {
for _, ip := range ips {
if err := utils.WaitSSHReady(c, 5, ip); err != nil {
Expand All @@ -274,27 +278,30 @@ func getSSHclient(infra *infrav1.Infra) ssh.Interface {
}

// Apply clusterfile on infra
func applyClusterfile(c ssh.Interface, EIP, clusterfile, sealosVersion string) error {
func applyClusterfile(c ssh.Interface, EIP, clusterfile, sealosVersion string, sealosArch string) error {
createClusterfile := fmt.Sprintf(`tee /root/Clusterfile <<EOF
%s
EOF`, clusterfile)
preDownloadSealos := fmt.Sprintf(preDownload, sealosVersion)
if err := c.CmdAsync(EIP, []string{preDownloadSealos}...); err != nil {
return fmt.Errorf("download sealos failed: %v", err)
}

// check sealos version
out, err := c.Cmd(EIP, sealosVersionCmd)
downloadSealos := fmt.Sprintf(downloadSealosCmd, sealosVersion, sealosArch)
if err == nil {
currentVersion, err2 := parseSealosVersion(out)
if err2 != nil {
return fmt.Errorf("get sealos version failed: %v", err2)
return fmt.Errorf("parse sealos version failed: %v", err2)
}
if *currentVersion != sealosVersion {
downloadSealos := fmt.Sprintf(downloadSealosCmd, sealosVersion)
if err := c.CmdAsync(EIP, []string{downloadSealos}...); err != nil {
return fmt.Errorf("download sealos failed: %v", err)
}
}
} else {
if err := c.CmdAsync(EIP, []string{downloadSealos}...); err != nil {
return fmt.Errorf("download sealos failed: %v", err)
}
}

cmds := []string{createClusterfile, applyClusterfileCmd}
if err := c.CmdAsync(EIP, cmds...); err != nil {
return fmt.Errorf("write clusterfile to remote failed: %v", err)
Expand All @@ -304,12 +311,16 @@ EOF`, clusterfile)
}

func parseSealosVersion(out []byte) (*string, error) {
version := &version2.Info{}
err := json.Unmarshal(out, version)
if err != nil {
return nil, fmt.Errorf("parse sealos version failed: %v", err)
str := string(out[:])
strs := strings.Split(str, "-")
if len(strs) < 2 {
return nil, fmt.Errorf("sealos version is not correct: %v", str)
}
var version string
for i := 0; i < len(strs)-1; i++ {
version += strs[i]
}
return &version.GitVersion, nil
return &version, nil
}

func getMasterClusterfile(c ssh.Interface, EIP string) (*v1beta1.Cluster, error) {
Expand Down
2 changes: 2 additions & 0 deletions controllers/infra/common/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ const (
KeyPairUser = "sealos.io/aws/keypair/user"
KeyPairGeneral = "sealos.io/aws/keypair/general"
KeyPairRoot = "sealos.io/aws/keypair/root"
ArchAmd64 = "amd64"
ArchArm64 = "arm64"
)

var DefaultRootVolumeSize = int32(40)
112 changes: 9 additions & 103 deletions controllers/infra/drivers/aws/get_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -272,9 +272,11 @@ func (d Driver) getInstances(infra *v1.Infra, status types.InstanceStateName) ([
continue
}
instanceType, imageID := i.InstanceType, i.ImageId
arch := getInstanceArch(i)

hostmap[index] = &v1.Hosts{
Count: 1,
Arch: arch,
Metadata: []v1.Metadata{metadata},
Image: *imageID,
Flavor: string(instanceType),
Expand All @@ -301,77 +303,13 @@ func (d Driver) getInstances(infra *v1.Infra, status types.InstanceStateName) ([
return hosts, nil
}

// createDisks assign disk(all mount paths) to host for the first time
//func setDisks(host *v1.Hosts, diskMap map[string]v1.Disk, instance types.Instance) {
// for j := range instance.BlockDeviceMappings {
// volumeID := *instance.BlockDeviceMappings[j].Ebs.VolumeId
// if v, ok := diskMap[volumeID]; ok {
// host.Disks = append(host.Disks,
// v1.Disk{
// // ID: []string{volumeID},
// Name: *instance.BlockDeviceMappings[j].DeviceName,
// Capacity: v.Capacity,
// Type: v.Type,
// },
// )
// }
// }
//}

// addDisks add the mount volumeID to each mount path
//func addDisks(host *v1.Hosts, diskMap map[string]v1.Disk, instance types.Instance) error {
// for _, blockDeviceMap := range instance.BlockDeviceMappings {
// if _, ok := diskMap[*blockDeviceMap.Ebs.VolumeId]; !ok {
// continue
// }
// //search disk index in host by name
// diskIndex := sort.Search(len(host.Disks), func(i int) bool {
// return host.Disks[i].Name >= *blockDeviceMap.DeviceName
// })
// if diskIndex < 0 || diskIndex >= len(host.Disks) {
// return fmt.Errorf("get aws disk error, disk not found. disk name: %s, instance id:%s", *blockDeviceMap.DeviceName, *instance.InstanceId)
// }
// // host.Disks[diskIndex].ID = append(host.Disks[diskIndex].ID, *blockDeviceMap.Ebs.VolumeId)
// }
// return nil
//}

// getVolumes get an infra owned volumes(by dataKey and infra label), return diskMap map[string(id)]v1.Disk.
//func (d Driver) getVolumes(infra *v1.Infra) (map[string]v1.Disk, error) {
// client := d.Client
// tagKey := fmt.Sprintf("tag:%s", common.DataVolumeLabel)
// //roleKey := fmt.Sprintf("tag:%s", roles)
// nameKey := fmt.Sprintf("tag:%s", common.InfraVolumesLabel)
// fullName := infra.GetInstancesAndVolumesTag()
// input := &ec2.DescribeVolumesInput{
// Filters: []types.Filter{
// {
// Name: &nameKey,
// Values: []string{fullName},
// },
// {
// Name: &tagKey,
// Values: []string{common.TRUELable},
// },
// },
// }
// result, err := client.DescribeVolumes(context.TODO(), input)
// if err != nil {
// return nil, fmt.Errorf("got an error retrieving information about your Amazon EC2 volumes: %v", err)
// }
// diskMap := make(map[string]v1.Disk, len(result.Volumes))
// for _, v := range result.Volumes {
// diskMap[*v.VolumeId] = v1.Disk{
// Capacity: int(*v.Size),
// Type: string(v.VolumeType),
// }
// }
// return diskMap, nil
//}

//func sortDisksByName(disks v1.NameDisks) {
// sort.Sort(disks)
//}
func getInstanceArch(i types.Instance) string {
arch := common.ArchAmd64
if i.Architecture == common.ArchArm64 {
arch = common.ArchArm64
}
return arch
}

func getIndex(i types.Instance) (int, error) {
for _, tag := range i.Tags {
Expand All @@ -390,35 +328,3 @@ func getVolIndex(v types.Volume) (int, error) {
}
return -1, fmt.Errorf("volume index not found: %v", *v.VolumeId)
}

//func retryGetInstance(tryTimes int, trySleepTime time.Duration, client *ec2.Client, inputGetInstance *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) {
//retry:
// for i := 0; i < tryTimes; i++ {
// result, err := GetInstances(context.TODO(), client, inputGetInstance)
// if err != nil {
// return nil, fmt.Errorf("get an error while getInstances: %v", err)
// }
// for _, r := range result.Reservations {
// if !instancesPublicIPAddressIsReady(r.Instances) {
// if i == tryTimes-1 {
// break retry
// }
// time.Sleep(trySleepTime * time.Duration(math.Pow(2, float64(i))))
// continue retry
// }
// }
// return result, nil
// }
// return nil, fmt.Errorf("retry get instance action timeout: no public ip")
//}

//func instancesPublicIPAddressIsReady(instances []types.Instance) bool {
// for j := range instances {
// if *instances[j].State.Code == 0 || *instances[j].State.Code == 16 {
// if instances[j].PublicIpAddress == nil {
// return false
// }
// }
// }
// return true
//}
3 changes: 1 addition & 2 deletions controllers/infra/how to update infra controller_zh .md
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,4 @@
| 支持多种linux镜像 | 在前端建立image和根用户的关系表,根据镜像传入不同的用户名;infra模块也要建立image和根目录的关系表,在create_instance中为根目录扩容。 |
| 支持更多类型的ec2实例(如t2.2xlarge)和更多类型的镜像 | 在前端页面代码扩容即可 |
| cluster支持增加删除集群节点操作 | 在每个master节点安装sealos,需要时登入任一master节点调用sealos add/selaos delete |
| cluster模块执行命令信息导出到前端 | 放到job中去做,能够实现fmt打印导出到前端 |

| cluster模块执行命令信息导出到前端 | 放到job中去做,能够实现fmt打印导出到前端 |
1 change: 1 addition & 0 deletions controllers/infra/testdata/api/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ spec:
infra: ${name}
image:
- labring/kubernetes:v1.25.5
- labring/helm:v3.8.2
- labring/calico:v3.24.1
`

Expand Down
4 changes: 2 additions & 2 deletions controllers/infra/testdata/api/infra.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ spec:
- roles: [master]
count: 1
flavor: t2.medium
image: "ami-09123565dfe16d662"
image: "ami-048280a00d5085dd1"
disks:
- capacity: 16
volumeType: gp3
Expand All @@ -29,7 +29,7 @@ spec:
- roles: [ node ]
count: 1
flavor: t2.medium
image: "ami-09123565dfe16d662"
image: "ami-048280a00d5085dd1"
disks:
- capacity: 16
volumeType: gp3
Expand Down
10 changes: 5 additions & 5 deletions docs/4.0/docs/cloud/apps/scp/scp.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ spec:
- roles: [master] # Required
count: 1 # Required
flavor: "t2.large"
image: "ami-0d66b970b9f16f1f5"
image: "ami-0d66b970b9f16f1f5"
- roles: [ node ] # Required
count: 1 # Required
flavor: "t2.medium"
Expand Down Expand Up @@ -129,10 +129,10 @@ spec:
hosts:
- roles: [ master ]
count: 1
flavor: t2.large # Virtual Machine Models
image: "ami-0d66b970b9f16f1f5" # Virtual Machine Image
flavor: t2.large # Virtual machine flavor
image: "ami-0d66b970b9f16f1f5" # Virtual machine image, need to be consistent with the virtual machine architecture
disks:
- capacity: 40 # System Disk Size
- capacity: 40 # System disk size
volumeType: gp3
type: "root" # System disk configuration
- capacity: 40
Expand Down Expand Up @@ -162,7 +162,7 @@ metadata:
sealos.io/version: "4.1.4" # Specify the version of sealos to use
spec:
infra: my-cluster # Specifying the name of the infra will start the kubernetes cluster on the corresponding infra
image: # List of cluster images, customize kubernetes version or other components according to your needs
image: # List of cluster images, customize kubernetes version or other components
- labring/kubernetes:v1.24.0
- labring/calico:v3.22.1
```
Loading

0 comments on commit d68fa93

Please sign in to comment.