Commit a98f4b86 authored by 中弈's avatar 中弈
Browse files

init sealer

Showing with 1356 additions and 0 deletions
+1356 -0
Makefile 0 → 100644
COMMIT_SHA1 ?= $(shell git rev-parse --short HEAD || echo "0.0.0")
BUILD_VERSION ?= $(shell cat version.txt || echo "local")
BUILD_TIME ?= $(shell date "+%F %T")
.PHONY: fmt vet lint default
GO_RELEASE_TAGS := $(shell go list -f ':{{join (context.ReleaseTags) ":"}}:' runtime)
# Only use the `-race` flag on newer versions of Go (version 1.3 and newer)
ifeq (,$(findstring :go1.3:,$(GO_RELEASE_TAGS)))
RACE_FLAG :=
else
RACE_FLAG := -race -cpu 1,2,4
endif
# Run `go vet` on Go 1.12 and newer. For Go 1.5-1.11, use `go tool vet`
ifneq (,$(findstring :go1.12:,$(GO_RELEASE_TAGS)))
GO_VET := go vet \
-atomic \
-bool \
-copylocks \
-nilfunc \
-printf \
-rangeloops \
-unreachable \
-unsafeptr \
-unusedresult \
.
else ifneq (,$(findstring :go1.5:,$(GO_RELEASE_TAGS)))
GO_VET := go tool vet \
-atomic \
-bool \
-copylocks \
-nilfunc \
-printf \
-shadow \
-rangeloops \
-unreachable \
-unsafeptr \
-unusedresult \
.
else
GO_VET := @echo "go vet skipped -- not supported on this version of Go"
endif
fmt: ## fmt
@echo gofmt -l
@OUTPUT=`gofmt -l . 2>&1`; \
if [ "$$OUTPUT" ]; then \
echo "gofmt must be run on the following files:"; \
echo "$$OUTPUT"; \
exit 1; \
fi
lint:
@golangci-lint run
.PHONY: lint
vet: ## vet
$(GO_VET)
default: fmt lint vet
local: clean ## 构建二进制
@echo "build bin ${BUILD_VERSION} ${BUILD_TIME} ${COMMIT_SHA1}"
@go build -mod vendor
clean: ## clean
@echo "build bin ${BUILD_VERSION} ${BUILD_TIME} ${COMMIT_SHA1}"
## 集群镜像
集群镜像标准库,基于这些库可以很方便的构建集群镜像生态工具
## 研发规范
1. 从develop分支切分支,完成之后提交合并请求到develop分支
# apply集群镜像
apply模块是一个顶层的封装模块,负责让集群实例保持clusterfile中定义的终态。
```
Apply(Clusterfile)
image.Load()
fs.Mount()
runtime.Run()
Init()
Hook()
JoinMasters()
JoinNodes()
StaticPod()
guest.Apply()
```
package apply
import (
"gitlab.alibaba-inc.com/seadent/pkg/common"
"gitlab.alibaba-inc.com/seadent/pkg/logger"
v1 "gitlab.alibaba-inc.com/seadent/pkg/types/api/v1"
"gitlab.alibaba-inc.com/seadent/pkg/utils"
)
type Interface interface {
Apply() error
Delete() error
}
func NewApplierFromFile(clusterfile string) Interface {
cluster := &v1.Cluster{}
if err := utils.UnmarshalYamlFile(clusterfile, cluster); err != nil {
logger.Error("apply cloud cluster failed", err)
return nil
}
return NewApplier(cluster)
}
func NewApplier(cluster *v1.Cluster) Interface {
switch cluster.Spec.Provider {
case common.ALI_CLOUD:
return NewAliCloudProvider(cluster)
}
return NewDefaultApplier(cluster)
}
package apply
import (
"fmt"
"github.com/pkg/errors"
"gitlab.alibaba-inc.com/seadent/pkg/common"
"gitlab.alibaba-inc.com/seadent/pkg/filesystem"
"gitlab.alibaba-inc.com/seadent/pkg/guest"
"gitlab.alibaba-inc.com/seadent/pkg/image"
"gitlab.alibaba-inc.com/seadent/pkg/infra"
"gitlab.alibaba-inc.com/seadent/pkg/logger"
"gitlab.alibaba-inc.com/seadent/pkg/runtime"
v1 "gitlab.alibaba-inc.com/seadent/pkg/types/api/v1"
"gitlab.alibaba-inc.com/seadent/pkg/utils"
"gitlab.alibaba-inc.com/seadent/pkg/utils/ssh"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const ApplyCluster = "chmod +x %s && %s apply -f %s"
type CloudApplier struct {
*DefaultApplier
}
func NewAliCloudProvider(cluster *v1.Cluster) Interface {
d := &DefaultApplier{
ClusterDesired: cluster,
ImageManager: image.NewImageService(),
FileSystem: filesystem.NewFilesystem(),
Runtime: runtime.NewDefaultRuntime(cluster),
Guest: guest.NewGuestManager(),
}
return &CloudApplier{d}
}
func (c *CloudApplier) Apply() error {
cluster := c.ClusterDesired
cloudProvider := infra.NewDefaultProvider(cluster)
if cloudProvider == nil {
return fmt.Errorf("new cloud provider failed")
}
err := cloudProvider.Apply(cluster)
if err != nil {
return fmt.Errorf("apply infra failed %v", err)
}
if cluster.DeletionTimestamp != nil {
return nil
}
err = c.SaveClusterfile()
if err != nil {
return err
}
cluster.Spec.Provider = common.BAREMETAL
err = utils.MarshalYamlToFile(common.TmpClusterfile, cluster)
if err != nil {
return fmt.Errorf("marshal tmp cluster file failed %v", err)
}
client, err := ssh.NewSSHClientWithCluster(cluster)
if err != nil {
return fmt.Errorf("prepare cluster ssh client failed %v", err)
}
err = runtime.PreInitMaster0(client.Ssh, client.Host)
err = client.Ssh.CmdAsync(client.Host, fmt.Sprintf(ApplyCluster, common.RemoteSealerPath, common.RemoteSealerPath, common.TmpClusterfile))
// fetch the cluster kubeconfig, and add /etc/hosts "EIP apiserver.cluster.local" so we can get the current cluster status later
err = client.Ssh.Fetch(client.Host, common.DefaultKubeconfig, common.KubeAdminConf)
if err != nil {
return err
}
err = utils.AppendFile(common.EtcHosts, fmt.Sprintf("%s %s", client.Host, common.APIServerDomain))
if err != nil {
return errors.Wrap(err, "append EIP to etc hosts failed")
}
err = client.Ssh.Fetch(client.Host, common.KubectlPath, common.KubectlPath)
if err != nil {
return errors.Wrap(err, "fetch kubectl failed")
}
err = utils.Cmd("chmod", "+x", common.KubectlPath)
if err != nil {
return errors.Wrap(err, "add EIP to etc hosts failed")
}
return nil
}
func (c *CloudApplier) Delete() error {
t := metav1.Now()
c.ClusterDesired.DeletionTimestamp = &t
host := c.ClusterDesired.GetClusterEIP()
err := c.Apply()
if err != nil {
return err
}
if err := utils.RemoveFileContent(common.EtcHosts, fmt.Sprintf("%s %s", host, common.APIServerDomain)); err != nil {
logger.Warn(err)
return nil
}
if err := utils.CleanFiles(common.DefaultKubeconfigDir, common.GetClusterWorkDir(c.ClusterDesired.Name), common.KubectlPath); err != nil {
logger.Warn(err)
return nil
}
return nil
}
func (c *CloudApplier) SaveClusterfile() error {
fileName := common.GetClusterWorkClusterfile(c.ClusterDesired.Name)
err := utils.MkFileFullPathDir(fileName)
if err != nil {
return fmt.Errorf("mkdir failed %s %v", fileName, err)
}
err = utils.MarshalYamlToFile(fileName, c.ClusterDesired)
if err != nil {
return fmt.Errorf("marshal cluster file failed %v", err)
}
return nil
}
package apply
import "testing"
func TestAppendFile(t *testing.T) {
type args struct {
content string
fileName string
}
tests := []struct {
name string
args args
wantErr bool
}{
{
"add hosts",
args{
content: "127.0.0.1 localhost",
fileName: "./test/hosts1",
},
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := AppendFile(tt.args.fileName, tt.args.content); (err != nil) != tt.wantErr {
t.Errorf("AppendFile() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestRemoveFileContent(t *testing.T) {
type args struct {
fileName string
content string
}
tests := []struct {
name string
args args
wantErr bool
}{
{
"delete hosts",
args{
content: "127.0.0.1 localhost",
fileName: "./test/hosts1",
},
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := RemoveFileContent(tt.args.fileName, tt.args.content); (err != nil) != tt.wantErr {
t.Errorf("RemoveFileContent() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
package apply
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"gitlab.alibaba-inc.com/seadent/pkg/client"
"gitlab.alibaba-inc.com/seadent/pkg/logger"
v1 "gitlab.alibaba-inc.com/seadent/pkg/types/api/v1"
)
const MasterRoleLabel = "node-role.kubernetes.io/master"
func GetCurrentCluster() (*v1.Cluster, error) {
return getCurrentNodes()
}
func getCurrentNodes() (*v1.Cluster, error) {
c, err := client.NewClientSet()
if err != nil {
logger.Info("current cluster not found, will create a new cluster %v", err)
return nil, nil
}
nodes, err := client.ListNodes(c)
if err != nil {
logger.Info("current cluster nodes not found, will create a new cluster")
return nil, nil
}
cluster := &v1.Cluster{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: v1.ClusterSpec{},
Status: v1.ClusterStatus{},
}
for _, node := range nodes.Items {
addr := getNodeAddress(&node)
if addr == "" {
continue
}
if _, ok := node.Labels[MasterRoleLabel]; ok {
cluster.Spec.Masters.IPList = append(cluster.Spec.Masters.IPList, addr)
continue
}
cluster.Spec.Nodes.IPList = append(cluster.Spec.Nodes.IPList, addr)
}
return cluster, nil
}
func getNodeAddress(node *corev1.Node) string {
if len(node.Status.Addresses) < 1 {
return ""
}
return node.Status.Addresses[0].Address
}
package apply
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"gitlab.alibaba-inc.com/seadent/pkg/logger"
v1 "gitlab.alibaba-inc.com/seadent/pkg/types/api/v1"
)
func TestGetCurrentCluster(t *testing.T) {
tests := []struct {
name string
want *v1.Cluster
wantErr bool
}{
{
"test get cluster nodes",
&v1.Cluster{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{},
Spec: v1.ClusterSpec{
Masters: v1.Hosts{
IPList: []string{},
},
Nodes: v1.Hosts{
IPList: []string{},
},
},
Status: v1.ClusterStatus{},
},
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GetCurrentCluster()
if (err != nil) != tt.wantErr {
t.Errorf("GetCurrentCluster() error = %v, wantErr %v", err, tt.wantErr)
return
}
logger.Info("masters : %v nodes : %v", got.Spec.Masters.IPList, got.Spec.Nodes.IPList)
})
}
}
package apply
import (
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"gitlab.alibaba-inc.com/seadent/pkg/filesystem"
"gitlab.alibaba-inc.com/seadent/pkg/guest"
"gitlab.alibaba-inc.com/seadent/pkg/image"
"gitlab.alibaba-inc.com/seadent/pkg/logger"
"gitlab.alibaba-inc.com/seadent/pkg/runtime"
v1 "gitlab.alibaba-inc.com/seadent/pkg/types/api/v1"
"gitlab.alibaba-inc.com/seadent/pkg/utils"
)
// cloud builder using cloud provider to build a cluster image
type DefaultApplier struct {
ClusterDesired *v1.Cluster
ClusterCurrent *v1.Cluster
ImageManager image.Service
FileSystem filesystem.Interface
Runtime runtime.Interface
Guest guest.Interface
MastersToJoin []string
MastersToDelete []string
NodesToJoin []string
NodesToDelete []string
}
type ActionName string
const (
PullIfNotExist ActionName = "PullIfNotExist"
Mount ActionName = "Mount"
UnMount ActionName = "UnMount"
Init ActionName = "Init"
Upgrade ActionName = "Upgrade"
ApplyMasters ActionName = "ApplyMasters"
ApplyNodes ActionName = "ApplyNodes"
Guest ActionName = "Guest"
Reset ActionName = "Reset"
)
var ActionFuncMap = map[ActionName]func(*DefaultApplier) error{
PullIfNotExist: func(applier *DefaultApplier) error {
imageName := applier.ClusterDesired.Spec.Image
return image.NewImageService().PullIfNotExist(imageName)
},
Mount: func(applier *DefaultApplier) error {
// TODO mount only mount desired hosts, some hosts already mounted when update cluster
var hosts []string
if applier.ClusterCurrent == nil {
hosts = append(applier.ClusterDesired.Spec.Masters.IPList, applier.ClusterDesired.Spec.Nodes.IPList...)
} else {
hosts = append(applier.MastersToJoin, applier.NodesToJoin...)
}
return applier.FileSystem.Mount(applier.ClusterDesired, hosts)
},
UnMount: func(applier *DefaultApplier) error {
return applier.FileSystem.UnMount(applier.ClusterDesired)
},
Init: func(applier *DefaultApplier) error {
return applier.Runtime.Init(applier.ClusterDesired)
},
Upgrade: func(applier *DefaultApplier) error {
return applier.Runtime.Upgrade(applier.ClusterDesired)
},
ApplyMasters: func(applier *DefaultApplier) error {
return applyMasters(applier)
},
ApplyNodes: func(applier *DefaultApplier) error {
return applyNodes(applier)
},
Guest: func(applier *DefaultApplier) error {
return applier.Guest.Apply(applier.ClusterDesired)
},
Reset: func(applier *DefaultApplier) error {
return applier.Runtime.Reset(applier.ClusterDesired)
},
}
func applyMasters(applier *DefaultApplier) error {
err := applier.Runtime.JoinMasters(applier.MastersToJoin)
if err != nil {
return err
}
err = applier.Runtime.DeleteMasters(applier.MastersToDelete)
if err != nil {
return err
}
return nil
}
func applyNodes(applier *DefaultApplier) error {
err := applier.Runtime.JoinNodes(applier.NodesToJoin)
if err != nil {
return err
}
err = applier.Runtime.DeleteNodes(applier.NodesToDelete)
if err != nil {
return err
}
return nil
}
func (c *DefaultApplier) Apply() (err error) {
currentCluster, err := GetCurrentCluster()
if err != nil {
return errors.Wrap(err, "get current cluster failed")
}
if currentCluster != nil {
c.ClusterCurrent = c.ClusterDesired.DeepCopy()
c.ClusterCurrent.Spec.Masters = currentCluster.Spec.Masters
c.ClusterCurrent.Spec.Nodes = currentCluster.Spec.Nodes
}
todoList, _ := c.diff()
for _, action := range todoList {
err := ActionFuncMap[action](c)
if err != nil {
return err
}
}
return nil
}
func (c *DefaultApplier) Delete() (err error) {
t := metav1.Now()
c.ClusterDesired.DeletionTimestamp = &t
return c.Apply()
}
func (c *DefaultApplier) diff() (todoList []ActionName, err error) {
if c.ClusterDesired.DeletionTimestamp != nil {
todoList = append(todoList, ApplyNodes)
todoList = append(todoList, ApplyMasters)
todoList = append(todoList, Reset)
todoList = append(todoList, UnMount)
return todoList, nil
}
if c.ClusterCurrent == nil {
todoList = append(todoList, PullIfNotExist)
todoList = append(todoList, Mount)
todoList = append(todoList, Init)
c.MastersToJoin = c.ClusterDesired.Spec.Masters.IPList[1:]
c.NodesToJoin = c.ClusterDesired.Spec.Nodes.IPList
todoList = append(todoList, ApplyMasters)
todoList = append(todoList, ApplyNodes)
todoList = append(todoList, Guest)
return todoList, nil
}
todoList = append(todoList, PullIfNotExist)
if c.ClusterDesired.Spec.Image != c.ClusterCurrent.Spec.Image {
logger.Info("current image is : %s and desired iamge is : %s , so upgrade your cluster", c.ClusterCurrent.Spec.Image, c.ClusterDesired.Spec.Image)
todoList = append(todoList, Upgrade)
}
c.MastersToJoin, c.MastersToDelete = utils.GetDiffHosts(c.ClusterCurrent.Spec.Masters, c.ClusterDesired.Spec.Masters)
c.NodesToJoin, c.NodesToDelete = utils.GetDiffHosts(c.ClusterCurrent.Spec.Nodes, c.ClusterDesired.Spec.Nodes)
todoList = append(todoList, Mount)
if c.MastersToJoin != nil || c.MastersToDelete != nil {
todoList = append(todoList, ApplyMasters)
}
if c.NodesToJoin != nil || c.NodesToDelete != nil {
todoList = append(todoList, ApplyNodes)
}
// if only contains PullIfNotExist and Mount, we do nothing
if len(todoList) == 2 {
return nil, nil
}
todoList = append(todoList, Guest)
return todoList, nil
}
func NewDefaultApplier(cluster *v1.Cluster) Interface {
return &DefaultApplier{
ClusterDesired: cluster,
ImageManager: image.NewImageService(),
FileSystem: filesystem.NewFilesystem(),
Runtime: runtime.NewDefaultRuntime(cluster),
Guest: guest.NewGuestManager(),
}
}
package apply
import (
"fmt"
"gitlab.alibaba-inc.com/seadent/pkg/common"
"gitlab.alibaba-inc.com/seadent/pkg/image"
v1 "gitlab.alibaba-inc.com/seadent/pkg/types/api/v1"
"net"
"sigs.k8s.io/yaml"
"strconv"
"strings"
)
type ClusterArgs struct {
cluster *v1.Cluster
nodeArgs string
masterArgs string
}
func IsNumber(args string) bool {
_, err := strconv.Atoi(args)
return err == nil
}
func IsIpList(args string) bool {
ipList := strings.Split(args, ",")
for _, i := range ipList {
ip := net.ParseIP(i)
if ip == nil {
return false
}
}
return true
}
func (c *ClusterArgs) SetClusterArgs() error {
if IsNumber(c.masterArgs) && IsNumber(c.nodeArgs) {
c.cluster.Spec.Masters.Count = c.masterArgs
c.cluster.Spec.Nodes.Count = c.nodeArgs
c.cluster.Spec.Provider = common.ALI_CLOUD
return nil
}
if IsIpList(c.masterArgs) && IsIpList(c.nodeArgs) {
c.cluster.Spec.Masters.IPList = strings.Split(c.masterArgs, ",")
c.cluster.Spec.Nodes.IPList = strings.Split(c.nodeArgs, ",")
c.cluster.Spec.Provider = common.BAREMETAL
return nil
}
return fmt.Errorf("enter true iplist or count")
}
func GetClusterFileByImageName(imageName string) (cluster *v1.Cluster, err error) {
clusterFile := image.GetClusterFileFromImageManifest(imageName)
if clusterFile == "" {
return nil, fmt.Errorf("failed to found Clusterfile")
}
if err := yaml.Unmarshal([]byte(clusterFile), &cluster); err != nil {
return nil, err
}
return cluster, nil
}
func NewApplierFromArgs(imageName string, masterArgs, nodeArgs string) (Interface, error) {
cluster, err := GetClusterFileByImageName(imageName)
if err != nil {
return nil, err
}
if nodeArgs == "" && masterArgs == "" {
return NewApplier(cluster), nil
}
c := &ClusterArgs{
cluster: cluster,
nodeArgs: nodeArgs,
masterArgs: masterArgs,
}
if err := c.SetClusterArgs(); err != nil {
return nil, err
}
return NewApplier(c.cluster), nil
}
aaalocalhost
asdfasdfa
\ No newline at end of file
# 集群镜像的构建流程
1. kubefile的解析
2. build上下文构建打包等
3. infra申请云资源启动临时集群
4. 在临时集群中执行指令
5. 存储集群中的容器镜像等资源
6. 打包集群镜像
package build
import (
"fmt"
"gitlab.alibaba-inc.com/seadent/pkg/apply"
"gitlab.alibaba-inc.com/seadent/pkg/common"
"gitlab.alibaba-inc.com/seadent/pkg/logger"
"gitlab.alibaba-inc.com/seadent/pkg/utils"
)
func (l *LocalBuilder) applyCluster() error {
if !utils.IsFileExist(common.TmpClusterfile) {
return fmt.Errorf("%s not found", common.TmpClusterfile)
}
applier := apply.NewApplierFromFile(common.TmpClusterfile)
if err := applier.Apply(); err != nil {
return fmt.Errorf("failed to apply cluster:%v", err)
}
logger.Info("apply cluster success !")
return nil
}
package build
import "gitlab.alibaba-inc.com/seadent/pkg/common"
type Interface interface {
Build(name string, context string, kubefileName string) error
}
func NewBuilder(config *Config, builderType string) Interface {
if builderType == common.LocalBuild {
return NewLocalBuilder(config)
}
return NewCloudBuilder(config)
}
package build
import (
"fmt"
"gitlab.alibaba-inc.com/seadent/pkg/common"
"gitlab.alibaba-inc.com/seadent/pkg/image"
"gitlab.alibaba-inc.com/seadent/pkg/infra"
"gitlab.alibaba-inc.com/seadent/pkg/logger"
v1 "gitlab.alibaba-inc.com/seadent/pkg/types/api/v1"
"gitlab.alibaba-inc.com/seadent/pkg/utils"
"gitlab.alibaba-inc.com/seadent/pkg/utils/ssh"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"os"
"strings"
)
// cloud builder using cloud provider to build a cluster image
type CloudBuilder struct {
local *LocalBuilder
RemoteHostIp string
SSH ssh.Interface
}
func (c *CloudBuilder) Build(name string, context string, kubefileName string) error {
err := c.local.initBuilder(name, context, kubefileName)
if err != nil {
return err
}
pipLine, err := c.GetBuildPipeLine()
if err != nil {
return err
}
for _, f := range pipLine {
if err = f(); err != nil {
return err
}
}
return nil
}
func (c *CloudBuilder) GetBuildPipeLine() ([]func() error, error) {
var buildPipeline []func() error
if err := c.local.InitImageSpec(); err != nil {
return nil, err
}
if c.local.IsOnlyCopy() {
buildPipeline = append(buildPipeline,
c.local.ExecBuild,
c.local.UpdateImageMetadata,
c.local.PushToRegistry)
} else {
buildPipeline = append(buildPipeline,
c.InitClusterFile,
c.ApplyInfra,
c.InitBuildSSH,
c.SendBuildContext,
c.RemoteLocalBuild,
c.Cleanup,
)
}
return buildPipeline, nil
}
// load cluster file from disk
func (c *CloudBuilder) InitClusterFile() error {
clusterFile := common.TmpClusterfile
if !utils.IsFileExist(clusterFile) {
err := c.getClusterFile()
if err != nil {
return err
}
clusterFile = common.RawClusterfile
}
var cluster v1.Cluster
err := utils.UnmarshalYamlFile(clusterFile, &cluster)
if err != nil {
return fmt.Errorf("failed to read %s:%v", clusterFile, err)
}
c.local.Cluster = &cluster
logger.Info("read cluster file %s success !", clusterFile)
return nil
}
func (c *CloudBuilder) getClusterFile() error {
// find cluster file from context
if c.getClusterFileFromContext() {
logger.Info("get cluster file from context success!")
return nil
}
// find cluster file from base image
clusterFile := image.GetClusterFileFromImageName(c.local.Image.Spec.Layers[0].Value)
if clusterFile == "" {
return fmt.Errorf("failed to find cluster file")
}
err := utils.WriteFile(common.RawClusterfile, []byte(clusterFile))
if err != nil {
return fmt.Errorf("failed to write cluster file:%v", err)
}
return nil
}
func (c *CloudBuilder) getClusterFileFromContext() bool {
for i := range c.local.Image.Spec.Layers {
layer := c.local.Image.Spec.Layers[i]
if layer.Type == common.COPYCOMMAND && strings.Fields(layer.Value)[0] == common.DefaultClusterFileName {
if _, err := utils.CopySingleFile(strings.Fields(layer.Value)[0], common.RawClusterfile); err != nil {
return false
}
return true
}
}
return false
}
// apply infra create vms
func (c *CloudBuilder) ApplyInfra() error {
if c.local.Cluster.Spec.Provider == common.ALI_CLOUD {
infraManager := infra.NewDefaultProvider(c.local.Cluster)
if err := infraManager.Apply(c.local.Cluster); err != nil {
return fmt.Errorf("failed to apply infra :%v", err)
}
c.local.Cluster.Spec.Provider = common.BAREMETAL
if err := utils.MarshalYamlToFile(common.TmpClusterfile, c.local.Cluster); err != nil {
return fmt.Errorf("failed to write cluster info:%v", err)
}
}
logger.Info("apply infra success !")
return nil
}
func (c *CloudBuilder) InitBuildSSH() error {
// init ssh client
client, err := ssh.NewSSHClientWithCluster(c.local.Cluster)
if err != nil {
return fmt.Errorf("failed to prepare cluster ssh client:%v", err)
}
c.SSH = client.Ssh
c.RemoteHostIp = client.Host
return nil
}
// send build context dir to remote host
func (c *CloudBuilder) SendBuildContext() error {
return c.sendBuildContext()
}
// run BUILD CMD commands
func (c *CloudBuilder) RemoteLocalBuild() (err error) {
return c.runBuildCommands()
}
//cleanup infra and tmp file
func (c *CloudBuilder) Cleanup() (err error) {
t := metav1.Now()
c.local.Cluster.DeletionTimestamp = &t
c.local.Cluster.Spec.Provider = common.ALI_CLOUD
infraManager := infra.NewDefaultProvider(c.local.Cluster)
if err := infraManager.Apply(c.local.Cluster); err != nil {
logger.Info("failed to cleanup infra :%v", err)
}
tarFileName := fmt.Sprintf(common.TmpTarFile, c.local.Image.Spec.ID)
if err = os.Remove(tarFileName); err != nil {
logger.Info("failed to cleanup local temp file %s:%v", tarFileName, err)
}
if err = os.Remove(common.TmpClusterfile); err != nil {
logger.Info("failed to cleanup local temp file %s:%v", common.TmpClusterfile, err)
}
if err = os.Remove(common.RawClusterfile); err != nil {
logger.Info("failed to cleanup local temp file %s:%v", common.RawClusterfile, err)
}
logger.Info("cleanup success !")
return nil
}
func NewCloudBuilder(cloudConfig *Config) Interface {
local := new(LocalBuilder)
local.Config = cloudConfig
return &CloudBuilder{
local: local,
}
}
package build
import (
"fmt"
"gitlab.alibaba-inc.com/seadent/pkg/common"
"gitlab.alibaba-inc.com/seadent/pkg/logger"
"gitlab.alibaba-inc.com/seadent/pkg/runtime"
"gitlab.alibaba-inc.com/seadent/pkg/utils"
)
//sendBuildContext:send local build context to remote server
func (c *CloudBuilder) sendBuildContext() (err error) {
// if remote cluster already exist,no need to pre init master0
if !c.SSH.IsFileExist(c.RemoteHostIp, common.RemoteSeadentPath) {
err = runtime.PreInitMaster0(c.SSH, c.RemoteHostIp)
if err != nil {
return fmt.Errorf("failed to prepare cluster env %v", err)
}
}
// tar local build context
tarFileName := fmt.Sprintf(common.TmpTarFile, c.local.Image.Spec.ID)
if _, isExist := utils.CheckCmdIsExist("tar"); !isExist {
return fmt.Errorf("local server muster support tar cmd")
}
if _, err := utils.RunSimpleCmd(fmt.Sprintf(common.ZipCmd, tarFileName, c.local.Context)); err != nil {
return fmt.Errorf("failed to create context file: %v", err)
}
// send to remote server
workdir := fmt.Sprintf(common.DefaultWorkDir, c.local.Cluster.Name)
if err := c.SSH.Copy(c.RemoteHostIp, tarFileName, tarFileName); err != nil {
return err
}
// unzip remote context
err = c.SSH.CmdAsync(c.RemoteHostIp, fmt.Sprintf(common.UnzipCmd, workdir, tarFileName, workdir))
if err != nil {
return err
}
logger.Info("send build context to %s success !", c.RemoteHostIp)
return nil
}
package build
import (
"fmt"
"gitlab.alibaba-inc.com/seadent/pkg/command"
"gitlab.alibaba-inc.com/seadent/pkg/common"
"gitlab.alibaba-inc.com/seadent/pkg/image"
"gitlab.alibaba-inc.com/seadent/pkg/image/reference"
imageUtils "gitlab.alibaba-inc.com/seadent/pkg/image/utils"
"gitlab.alibaba-inc.com/seadent/pkg/logger"
"gitlab.alibaba-inc.com/seadent/pkg/parser"
v1 "gitlab.alibaba-inc.com/seadent/pkg/types/api/v1"
"gitlab.alibaba-inc.com/seadent/pkg/utils"
"gitlab.alibaba-inc.com/seadent/pkg/utils/hash"
"gitlab.alibaba-inc.com/seadent/pkg/utils/mount"
"io/ioutil"
"path/filepath"
"strings"
)
type Config struct {
}
// LocalBuilder: local builder using local provider to build a cluster image
type LocalBuilder struct {
Config *Config
Image *v1.Image
Cluster *v1.Cluster
ImageName string
ImageID string
Context string
KubeFileName string
}
func (l *LocalBuilder) Build(name string, context string, kubefileName string) error {
err := l.initBuilder(name, context, kubefileName)
if err != nil {
return err
}
pipLine, err := l.GetBuildPipeLine()
if err != nil {
return err
}
for _, f := range pipLine {
if err = f(); err != nil {
return err
}
}
return nil
}
func (l *LocalBuilder) initBuilder(name string, context string, kubefileName string) error {
named, err := reference.ParseToNamed(name)
if err != nil {
return err
}
l.ImageName = named.Raw()
l.Context = context
l.KubeFileName = kubefileName
return nil
}
func (l *LocalBuilder) GetBuildPipeLine() ([]func() error, error) {
var buildPipeline []func() error
if err := l.InitImageSpec(); err != nil {
return nil, err
}
if l.IsOnlyCopy() {
buildPipeline = append(buildPipeline,
l.ExecBuild,
l.UpdateImageMetadata,
l.PushToRegistry)
} else {
buildPipeline = append(buildPipeline,
l.PullBaseImageNotExist,
l.ApplyCluster,
l.ExecBuild,
l.UpdateImageMetadata,
l.PushToRegistry)
}
return buildPipeline, nil
}
// init default Image metadata
func (l *LocalBuilder) InitImageSpec() error {
kubeFile, err := utils.ReadAll(l.KubeFileName)
if err != nil {
return fmt.Errorf("failed to load kubefile: %v", err)
}
l.Image = parser.NewParse().Parse(kubeFile, l.ImageName)
if l.Image == nil {
return fmt.Errorf("failed to parse kubefile, image is nil")
}
layer0 := l.Image.Spec.Layers[0]
if layer0.Type != common.FROMCOMMAND {
return fmt.Errorf("first line of kubefile must be FROM")
}
l.Image.Spec.ID = utils.GenUniqueID(32)
logger.Info("init image spec success! image id is %s", l.Image.Spec.ID)
return nil
}
func (l *LocalBuilder) IsOnlyCopy() bool {
for i := 1; i < len(l.Image.Spec.Layers); i++ {
if l.Image.Spec.Layers[i].Type == common.RUNCOMMAND ||
l.Image.Spec.Layers[i].Type == common.CMDCOMMAND {
return false
}
}
return true
}
func (l *LocalBuilder) PullBaseImageNotExist() (err error) {
if l.Image.Spec.Layers[0].Value == common.ImageScratch {
return nil
}
if err = image.NewImageService().PullIfNotExist(l.Image.Spec.Layers[0].Value); err != nil {
return fmt.Errorf("failed to pull baseImage: %v", err)
}
logger.Info("pull baseImage %s success", l.Image.Spec.Layers[0].Value)
return nil
}
func (l *LocalBuilder) ExecBuild() error {
baseLayers, err := image.GetImageHashList(l.Image)
if err != nil {
return err
}
for i := 1; i < len(l.Image.Spec.Layers); i++ {
layer := &l.Image.Spec.Layers[i]
logger.Info("run build layer: %s %s", layer.Type, layer.Value)
if layer.Type == common.COPYCOMMAND {
err = l.execCopyLayer(layer)
if err != nil {
return err
}
} else {
// exec other build cmd,need to mount
err = l.execOtherLayer(layer, baseLayers)
if err != nil {
return err
}
}
baseLayers = append(baseLayers, filepath.Join(common.DefaultLayerDir, layer.Hash))
}
logger.Info("exec all build instructs success !")
return nil
}
// run COPY command, because user can overwrite some file like Cluster file, or build a base image
func (l *LocalBuilder) execCopyLayer(layer *v1.Layer) error {
//count layer hash;create layer dir ;update image layer hash
tempDir, err := utils.MkTmpdir()
if err != nil {
return fmt.Errorf("failed to create %s:%v", tempDir, err)
}
defer utils.CleanDir(tempDir)
err = l.execLayer(layer, tempDir)
if err != nil {
return fmt.Errorf("failed to exec layer %v:%v", layer, err)
}
if err = l.countLayerHash(layer, tempDir); err != nil {
return err
}
return nil
}
func (l *LocalBuilder) execOtherLayer(layer *v1.Layer, lowLayers []string) error {
tempTarget, err := utils.MkTmpdir()
if err != nil {
return fmt.Errorf("failed to create %s:%v", tempTarget, err)
}
tempUpper, err := utils.MkTmpdir()
if err != nil {
return fmt.Errorf("failed to create %s:%v", tempUpper, err)
}
defer utils.CleanDirs(tempTarget, tempUpper)
if err = l.mountAndExecLayer(layer, tempTarget, tempUpper, lowLayers...); err != nil {
return err
}
if err = l.countLayerHash(layer, tempUpper); err != nil {
return err
}
return nil
}
func (l *LocalBuilder) mountAndExecLayer(layer *v1.Layer, tempTarget, tempUpper string, lowLayers ...string) error {
driver := mount.NewMountDriver()
err := driver.Mount(tempTarget, tempUpper, lowLayers...)
if err != nil {
return fmt.Errorf("failed to mount target %s:%v", tempTarget, err)
}
err = l.execLayer(layer, tempTarget)
if err != nil {
return fmt.Errorf("failed to exec layer %v:%v", layer, err)
}
if err = driver.Unmount(tempTarget); err != nil {
return fmt.Errorf("failed to umount %s:%v", tempTarget, err)
}
return nil
}
func (l *LocalBuilder) execLayer(layer *v1.Layer, tempTarget string) error {
// exec layer cmd;
if layer.Type == common.COPYCOMMAND {
dist := ""
if utils.IsDir(strings.Fields(layer.Value)[0]) {
// src is dir
dist = filepath.Join(tempTarget, strings.Fields(layer.Value)[1])
} else {
// src is file
dist = filepath.Join(tempTarget, strings.Fields(layer.Value)[1], strings.Fields(layer.Value)[0])
}
return utils.RecursionCopy(strings.Fields(layer.Value)[0], dist)
}
if layer.Type == common.RUNCOMMAND || layer.Type == common.CMDCOMMAND {
cmd := fmt.Sprintf(common.CdAndExecCmd, tempTarget, layer.Value)
_, err := command.NewSimpleCommand(cmd).Exec()
return err
}
return nil
}
func (l *LocalBuilder) countLayerHash(layer *v1.Layer, tempTarget string) error {
layerHash, err := hash.CheckSumAndPlaceLayer(tempTarget)
if err != nil {
return fmt.Errorf("failed to count layer hash:%v", err)
}
layer.Hash = layerHash
return nil
}
func (l *LocalBuilder) ApplyCluster() error {
return l.applyCluster()
}
func (l *LocalBuilder) UpdateImageMetadata() error {
// write image info to its metadata
filename := fmt.Sprintf("%s/%s%s", common.DefaultImageMetaRootDir, l.Image.Spec.ID, common.YamlSuffix)
//set cluster file
if utils.IsFileExist(common.RawClusterfile) {
bytes, err := ioutil.ReadFile(common.RawClusterfile)
if err != nil {
return err
}
if l.Image.Annotations == nil {
l.Image.Annotations = make(map[string]string)
}
l.Image.Annotations[common.ImageAnnotationForClusterfile] = string(bytes)
}
if err := utils.MarshalYamlToFile(filename, l.Image); err != nil {
return fmt.Errorf("failed to write image yaml:%v", err)
}
logger.Info("write image yaml file to %s success !", filename)
if err := imageUtils.SetImageMetadata(imageUtils.ImageMetadata{
Name: l.ImageName,
Id: l.Image.Spec.ID,
}); err != nil {
return fmt.Errorf("failed to set image metadata :%v", err)
}
logger.Info("update image %s to image metadata success !", l.ImageName)
return nil
}
func (l *LocalBuilder) PushToRegistry() error {
//push image
err := image.NewImageService().Push(l.ImageName)
if err != nil {
return fmt.Errorf("failed to push image :%v", err)
}
logger.Info("push image %s to registry success !", l.ImageName)
return nil
}
func NewLocalBuilder(config *Config) Interface {
c := new(LocalBuilder)
c.Config = config
return c
}
package build
import (
"fmt"
"gitlab.alibaba-inc.com/seadent/pkg/common"
"gitlab.alibaba-inc.com/seadent/pkg/logger"
)
func (c *CloudBuilder) runBuildCommands() error {
// send raw cluster file
if err := c.SSH.Copy(c.RemoteHostIp, common.RawClusterfile, common.RawClusterfile); err != nil {
return err
}
workdir := fmt.Sprintf(common.DefaultWorkDir, c.local.Cluster.Name)
build := fmt.Sprintf(common.BuildClusterCmd, common.ExecBinaryFileName,
c.local.KubeFileName, c.local.ImageName, common.LocalBuild)
logger.Info("run remote build %s", build)
cmd := fmt.Sprintf("cd %s && %s", workdir, build)
err := c.SSH.CmdAsync(c.RemoteHostIp, cmd)
if err != nil {
return fmt.Errorf("failed to run remote build:%v", err)
}
return nil
}
package testing
import (
"gitlab.alibaba-inc.com/seadent/pkg/build"
"gitlab.alibaba-inc.com/seadent/pkg/utils/ssh"
"os"
"testing"
)
/*func TestCloudBuilder_BuildOneByOne(t *testing.T) {
cb := new(build.CloudBuilder)
cb.KubefileName = "kubefile"
cb.ImageName = "myimage"
cb.Context = "."
err := cb.InitImageSpec()
if err != nil {
t.Errorf("init images error %v\n", err)
}
cluster := &v1.Cluster{
TypeMeta: metav1.TypeMeta{APIVersion: "", Kind: "Image"},
ObjectMeta: metav1.ObjectMeta{Name: "myCluster"},
Spec: v1.ClusterSpec{
Masters: v1.Hosts{
IPList: []string{"192.168.56.101", "192.168.56.102"},
},
},
}
cb.Cluster = cluster
cb.SSH = &ssh.SSH{
User: "root",
Password: "123456",
}
t.Run("test send context file", func(t *testing.T) {
if err := cb.SendBuildContext(); err != nil {
t.Errorf("send context failed,error is %v\n", err)
}
workdir := fmt.Sprintf(common.DefaultWorkDir, cb.Cluster.Name)
want := filepath.Join(workdir, "kubefile")
if !cb.SSH.IsFileExist(cb.Cluster.Annotations[common.RemoteServerEIPAnnotation], want) {
t.Errorf("test send context file failed: %s not found", want)
}
})
t.Run("test exec remote local build", func(t *testing.T) {
if err := cb.RemoteLocalBuild(); err != nil {
t.Errorf("remote local build failed,error is %v\n", err)
}
})
t.Run("test PullImage from remote", func(t *testing.T) {
if err := cb.SaveAndPullImage(); err != nil {
t.Errorf("pull image failed,error is %v\n", err)
}
imageFileName := fmt.Sprintf("%s.tar.gz", cb.Image.Spec.ID)
want := fmt.Sprintf("%s/%s", common.DefaultImageRootDir, imageFileName)
if IsNotExist(want) {
t.Errorf("test pull images file failed: %s not found", want)
}
})
}*/
func TestCloudBuilder_Build(t *testing.T) {
conf := &build.Config{
SSH: &ssh.SSH{
User: "a",
Password: "b",
},
}
/* cluster := &v1.Cluster{
TypeMeta: metav1.TypeMeta{APIVersion: "", Kind: "Image"},
ObjectMeta: metav1.ObjectMeta{Name: "myCluster"},
Spec: v1.ClusterSpec{
Masters: v1.Hosts{
IPList: []string{"192.168.56.101", "192.168.56.102"},
},
},
}*/
builder := build.NewBuilder(conf, "cloud")
err := builder.Build("myimage", ".", "kubefile", "88888888888")
if err != nil {
t.Errorf("exec build error %v\n", err)
}
}
func IsNotExist(fileName string) bool {
_, err := os.Lstat(fileName)
return os.IsNotExist(err)
}
i am dashboard yaml
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment