Unverified Commit c5b1b922 authored by Dennis Marttinen's avatar Dennis Marttinen
Browse files

Update import paths to vendored cluster-api-provider-existinginfra

Signed-off-by: default avatarDennis Marttinen <dennis@weave.works>
parent 45102412
Showing with 172 additions and 198 deletions
+172 -198
......@@ -7,13 +7,14 @@ import (
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/apis/wksprovider/machine/config"
capeios "github.com/weaveworks/cluster-api-provider-existinginfra/pkg/apis/wksprovider/machine/os"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/kubeadm"
"github.com/weaveworks/wksctl/pkg/addons"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/config"
wksos "github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/os"
"github.com/weaveworks/wksctl/pkg/manifests"
"github.com/weaveworks/wksctl/pkg/plan/runners/ssh"
"github.com/weaveworks/wksctl/pkg/specs"
"github.com/weaveworks/wksctl/pkg/utilities/kubeadm"
"github.com/weaveworks/wksctl/pkg/utilities/manifest"
"github.com/weaveworks/wksctl/pkg/version"
)
......@@ -103,7 +104,7 @@ func (a *Applier) initiateCluster(clusterManifestPath, machinesManifestPath stri
return errors.Wrap(err, "failed to create SSH client")
}
defer sshClient.Close()
installer, err := wksos.Identify(sshClient)
installer, err := capeios.Identify(sshClient)
if err != nil {
return errors.Wrapf(err, "failed to identify operating system for seed node (%s)", sp.GetMasterPublicAddress())
}
......@@ -155,7 +156,7 @@ func (a *Applier) initiateCluster(clusterManifestPath, machinesManifestPath stri
}
}
if err := installer.SetupSeedNode(wksos.SeedNodeParams{
if err := wksos.SetupSeedNode(installer, wksos.SeedNodeParams{
PublicIP: sp.GetMasterPublicAddress(),
PrivateIP: sp.GetMasterPrivateAddress(),
ServicesCIDRBlocks: sp.Cluster.Spec.ClusterNetwork.Services.CIDRBlocks,
......
......@@ -6,6 +6,7 @@ import (
"github.com/pkg/errors"
"github.com/spf13/cobra"
capeipath "github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/path"
"github.com/weaveworks/wksctl/pkg/kubernetes/config"
"github.com/weaveworks/wksctl/pkg/manifests"
"github.com/weaveworks/wksctl/pkg/specs"
......@@ -105,7 +106,7 @@ func writeKubeconfig(cpath, mpath string) error {
sp := specs.NewFromPaths(cpath, mpath)
if kubeconfigOptions.artifactDirectory != "" {
wksHome, err = path.CreateDirectory(path.ExpandHome(kubeconfigOptions.artifactDirectory))
wksHome, err = path.CreateDirectory(capeipath.ExpandHome(kubeconfigOptions.artifactDirectory))
if err != nil {
return errors.Wrapf(err, "failed to create WKS home directory")
}
......
......@@ -6,8 +6,9 @@ import (
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/config"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/os"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/apis/wksprovider/machine/config"
capeios "github.com/weaveworks/cluster-api-provider-existinginfra/pkg/apis/wksprovider/machine/os"
wksos "github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/os"
"github.com/weaveworks/wksctl/pkg/manifests"
"github.com/weaveworks/wksctl/pkg/plan/runners/ssh"
"github.com/weaveworks/wksctl/pkg/specs"
......@@ -90,7 +91,7 @@ func displayPlan(clusterManifestPath, machinesManifestPath string) error {
return errors.Wrap(err, "failed to create SSH client: ")
}
defer sshClient.Close()
installer, err := os.Identify(sshClient)
installer, err := capeios.Identify(sshClient)
if err != nil {
return errors.Wrapf(err, "failed to identify operating system for seed node (%s)", sp.GetMasterPublicAddress())
}
......@@ -101,7 +102,7 @@ func displayPlan(clusterManifestPath, machinesManifestPath string) error {
configDir = filepath.Dir(clusterManifestPath)
}
params := os.SeedNodeParams{
params := wksos.SeedNodeParams{
PublicIP: sp.GetMasterPublicAddress(),
PrivateIP: sp.GetMasterPrivateAddress(),
ClusterManifestPath: clusterManifestPath,
......@@ -111,11 +112,11 @@ func displayPlan(clusterManifestPath, machinesManifestPath string) error {
NodeIP: sp.GetMasterPrivateAddress(),
CloudProvider: sp.GetCloudProvider(),
},
Controller: os.ControllerParams{
Controller: wksos.ControllerParams{
ImageOverride: viewOptions.controllerImage,
ImageBuiltin: "docker.io/weaveworks/wksctl-controller:" + version.ImageTag,
},
GitData: os.GitParams{
GitData: wksos.GitParams{
GitURL: viewOptions.gitURL,
GitBranch: viewOptions.gitBranch,
GitPath: viewOptions.gitPath,
......@@ -126,7 +127,7 @@ func displayPlan(clusterManifestPath, machinesManifestPath string) error {
AddonNamespaces: manifest.DefaultAddonNamespaces,
ConfigDirectory: configDir,
}
plan, err := installer.CreateSeedNodeSetupPlan(params)
plan, err := wksos.CreateSeedNodeSetupPlan(installer, params)
if err != nil {
return errors.Wrap(err, "could not generate plan")
}
......
......@@ -13,11 +13,11 @@ import (
"github.com/ghodss/yaml"
"github.com/google/go-jsonnet"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/manifest"
"github.com/weaveworks/libgitops/pkg/serializer"
"github.com/weaveworks/wksctl/pkg/addons/assets"
"github.com/weaveworks/wksctl/pkg/qjson"
"github.com/weaveworks/wksctl/pkg/registry"
"github.com/weaveworks/wksctl/pkg/utilities/manifest"
)
const (
......
package kubeadm
import (
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/config"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/apis/wksprovider/machine/config"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
......
......@@ -4,7 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/config"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/apis/wksprovider/machine/config"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/config/kubeadm"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
"sigs.k8s.io/yaml"
......
......@@ -16,22 +16,24 @@ import (
"github.com/bitnami-labs/sealed-secrets/pkg/crypto"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
capeiv1alpha3 "github.com/weaveworks/cluster-api-provider-existinginfra/apis/cluster.weave.works/v1alpha3"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/apis/wksprovider/machine/config"
capeios "github.com/weaveworks/cluster-api-provider-existinginfra/pkg/apis/wksprovider/machine/os"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan"
capeirecipe "github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan/recipe"
capeiresource "github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan/resource"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/envcfg"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/manifest"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/object"
"github.com/weaveworks/libgitops/pkg/serializer"
"github.com/weaveworks/wksctl/pkg/addons"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/controller/manifests"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/config"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/crds"
"github.com/weaveworks/wksctl/pkg/cluster/machine"
existinginfrav1 "github.com/weaveworks/wksctl/pkg/existinginfra/v1alpha3"
"github.com/weaveworks/wksctl/pkg/plan"
"github.com/weaveworks/wksctl/pkg/plan/recipe"
"github.com/weaveworks/wksctl/pkg/plan/resource"
"github.com/weaveworks/wksctl/pkg/plan/runners/sudo"
"github.com/weaveworks/wksctl/pkg/scheme"
"github.com/weaveworks/wksctl/pkg/specs"
"github.com/weaveworks/wksctl/pkg/utilities/envcfg"
"github.com/weaveworks/wksctl/pkg/utilities/manifest"
"github.com/weaveworks/wksctl/pkg/utilities/object"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/apps/v1beta2"
v1 "k8s.io/api/core/v1"
......@@ -166,7 +168,7 @@ func (params SeedNodeParams) GetAddonNamespace(name string) string {
// SetupSeedNode installs Kubernetes on this machine, and store the provided
// manifests in the API server, so that the rest of the cluster can then be
// set up by the WKS controller.
func SetupSeedNode(o *capios.OS, params SeedNodeParams) error {
func SetupSeedNode(o *capeios.OS, params SeedNodeParams) error {
p, err := CreateSeedNodeSetupPlan(o, params)
if err != nil {
return err
......@@ -176,7 +178,7 @@ func SetupSeedNode(o *capios.OS, params SeedNodeParams) error {
// CreateSeedNodeSetupPlan constructs the seed node plan used to setup the initial node
// prior to turning control over to wks-controller
func CreateSeedNodeSetupPlan(o *capios.OS, params SeedNodeParams) (*plan.Plan, error) {
func CreateSeedNodeSetupPlan(o *capeios.OS, params SeedNodeParams) (*plan.Plan, error) {
if err := params.Validate(); err != nil {
return nil, err
}
......@@ -200,10 +202,10 @@ func CreateSeedNodeSetupPlan(o *capios.OS, params SeedNodeParams) (*plan.Plan, e
b := plan.NewBuilder()
baseRes := capirecipe.BuildBasePlan(o.PkgType)
baseRes := capeirecipe.BuildBasePlan(o.PkgType)
b.AddResource("install:base", baseRes)
configRes := capirecipe.BuildConfigPlan(configFileResources)
configRes := capeirecipe.BuildConfigPlan(configFileResources)
b.AddResource("install:config", configRes, plan.DependOn("install:base"))
pemSecretResources, authConfigMap, authConfigManifest, err := processPemFilesIfAny(b, &cluster.Spec, params.ConfigDirectory, params.Namespace, params.SealedSecretKeyPath, params.SealedSecretCertPath)
......@@ -211,10 +213,10 @@ func CreateSeedNodeSetupPlan(o *capios.OS, params SeedNodeParams) (*plan.Plan, e
return nil, err
}
criRes := capirecipe.BuildCRIPlan(&cluster.Spec.CRI, cfg, o.PkgType)
criRes := capeirecipe.BuildCRIPlan(&cluster.Spec.CRI, cfg, o.PkgType)
b.AddResource("install:cri", criRes, plan.DependOn("install:config"))
k8sRes := capirecipe.BuildK8SPlan(kubernetesVersion, params.KubeletConfig.NodeIP, cfg.SELinuxInstalled, cfg.SetSELinuxPermissive, cfg.DisableSwap, cfg.LockYUMPkgs, o.PkgType, params.KubeletConfig.CloudProvider, params.KubeletConfig.ExtraArguments)
k8sRes := capeirecipe.BuildK8SPlan(kubernetesVersion, params.KubeletConfig.NodeIP, cfg.SELinuxInstalled, cfg.SetSELinuxPermissive, cfg.DisableSwap, cfg.LockYUMPkgs, o.PkgType, params.KubeletConfig.CloudProvider, params.KubeletConfig.ExtraArguments)
b.AddResource("install:k8s", k8sRes, plan.DependOn("install:cri"))
apiServerArgs := getAPIServerArgs(&cluster.Spec, pemSecretResources)
......@@ -255,7 +257,7 @@ func CreateSeedNodeSetupPlan(o *capios.OS, params SeedNodeParams) (*plan.Plan, e
// TODO(damien): Add a CNI section in cluster.yaml once we support more than one CNI plugin.
const cni = "weave-net"
cniAdddon := existinginfrav1.Addon{Name: cni}
cniAdddon := capeiv1alpha3.Addon{Name: cni}
// we use the namespace defined in addon-namespace map to make weave-net run in kube-system
// as weave-net requires to run in the kube-system namespace *only*.
......@@ -298,7 +300,7 @@ func CreateSeedNodeSetupPlan(o *capios.OS, params SeedNodeParams) (*plan.Plan, e
if err != nil {
return nil, err
}
b.AddResource("node:plan", &resource.KubectlAnnotateSingleNode{Key: capirecipe.PlanKey, Value: seedNodePlan.ToJSON()}, plan.DependOn("kubeadm:init"))
b.AddResource("node:plan", &resource.KubectlAnnotateSingleNode{Key: capeirecipe.PlanKey, Value: seedNodePlan.ToJSON()}, plan.DependOn("kubeadm:init"))
addAuthConfigMapIfNecessary(configMapManifests, authConfigManifest)
......@@ -350,7 +352,7 @@ func CreateSeedNodeSetupPlan(o *capios.OS, params SeedNodeParams) (*plan.Plan, e
addonRsc := recipe.BuildAddonPlan(params.ClusterManifestPath, addons)
b.AddResource("install:addons", addonRsc, plan.DependOn("kubectl:apply:cluster", "kubectl:apply:machines"))
return capios.CreatePlan(b)
return capeios.CreatePlan(b)
}
// Sets the pod CIDR block in the weave-net manifest
......@@ -500,17 +502,17 @@ func storeIfNotEmpty(vals map[string]string, key, value string) {
}
}
func getAPIServerArgs(providerSpec *existinginfrav1.ClusterSpec, pemSecretResources map[string]*secretResourceSpec) map[string]string {
func getAPIServerArgs(providerSpec *capeiv1alpha3.ClusterSpec, pemSecretResources map[string]*secretResourceSpec) map[string]string {
result := map[string]string{}
authnResourceSpec := pemSecretResources["authentication"]
if authnResourceSpec != nil {
storeIfNotEmpty(result, "authentication-token-webhook-config-file", filepath.Join(capios.ConfigDestDir, authnResourceSpec.secretName+".yaml"))
storeIfNotEmpty(result, "authentication-token-webhook-config-file", filepath.Join(capeios.ConfigDestDir, authnResourceSpec.secretName+".yaml"))
storeIfNotEmpty(result, "authentication-token-webhook-cache-ttl", providerSpec.Authentication.CacheTTL)
}
authzResourceSpec := pemSecretResources["authorization"]
if authzResourceSpec != nil {
result["authorization-mode"] = "Webhook"
storeIfNotEmpty(result, "authorization-webhook-config-file", filepath.Join(capios.ConfigDestDir, authzResourceSpec.secretName+".yaml"))
storeIfNotEmpty(result, "authorization-webhook-config-file", filepath.Join(capeios.ConfigDestDir, authzResourceSpec.secretName+".yaml"))
storeIfNotEmpty(result, "authorization-webhook-cache-unauthorized-ttl", providerSpec.Authorization.CacheUnauthorizedTTL)
storeIfNotEmpty(result, "authorization-webhook-cache-authorized-ttl", providerSpec.Authorization.CacheAuthorizedTTL)
}
......@@ -537,12 +539,12 @@ func addClusterAPICRDs(b *plan.Builder) ([]string, error) {
return crdIDs, nil
}
func seedNodeSetupPlan(o *capios.OS, params SeedNodeParams, providerSpec *existinginfrav1.ClusterSpec, providerConfigMaps map[string]*v1.ConfigMap, authConfigMap *v1.ConfigMap, secretResources map[string]*secretResourceSpec, kubernetesVersion, kubernetesNamespace string) (*plan.Plan, error) {
secrets := map[string]capiresource.SecretData{}
func seedNodeSetupPlan(o *capeios.OS, params SeedNodeParams, providerSpec *capeiv1alpha3.ClusterSpec, providerConfigMaps map[string]*v1.ConfigMap, authConfigMap *v1.ConfigMap, secretResources map[string]*secretResourceSpec, kubernetesVersion, kubernetesNamespace string) (*plan.Plan, error) {
secrets := map[string]capeiresource.SecretData{}
for k, v := range secretResources {
secrets[k] = v.decrypted
}
nodeParams := capios.NodeParams{
nodeParams := capeios.NodeParams{
IsMaster: true,
MasterIP: params.PrivateIP,
MasterPort: 6443, // See TODO in machine_actuator.go
......@@ -560,7 +562,7 @@ func seedNodeSetupPlan(o *capios.OS, params SeedNodeParams, providerSpec *existi
return o.CreateNodeSetupPlan(nodeParams)
}
func applySeedNodePlan(o *capios.OS, p *plan.Plan) error {
func applySeedNodePlan(o *capeios.OS, p *plan.Plan) error {
err := p.Undo(o.Runner, plan.EmptyState)
if err != nil {
log.Infof("Pre-plan cleanup failed:\n%s\n", err)
......@@ -575,7 +577,7 @@ func applySeedNodePlan(o *capios.OS, p *plan.Plan) error {
return err
}
func createConfigFileResourcesFromFiles(providerSpec *existinginfrav1.ClusterSpec, configDir, namespace string) (map[string][]byte, map[string]*v1.ConfigMap, []*capiresource.File, error) {
func createConfigFileResourcesFromFiles(providerSpec *capeiv1alpha3.ClusterSpec, configDir, namespace string) (map[string][]byte, map[string]*v1.ConfigMap, []*capeiresource.File, error) {
fileSpecs := providerSpec.OS.Files
configMapManifests, err := getConfigMapManifests(fileSpecs, configDir, namespace)
if err != nil {
......@@ -589,14 +591,14 @@ func createConfigFileResourcesFromFiles(providerSpec *existinginfrav1.ClusterSpe
}
configMaps[name] = cmap
}
resources, err := capios.CreateConfigFileResourcesFromConfigMaps(fileSpecs, configMaps)
resources, err := capeios.CreateConfigFileResourcesFromConfigMaps(fileSpecs, configMaps)
if err != nil {
return nil, nil, nil, err
}
return configMapManifests, configMaps, resources, nil
}
func getConfigMapManifests(fileSpecs []existinginfrav1.FileSpec, configDir, namespace string) (map[string][]byte, error) {
func getConfigMapManifests(fileSpecs []capeiv1alpha3.FileSpec, configDir, namespace string) (map[string][]byte, error) {
configMapManifests := map[string][]byte{}
for _, fileSpec := range fileSpecs {
mapName := fileSpec.Source.ConfigMap
......@@ -640,7 +642,7 @@ func getConfigFileContents(fileNameComponent ...string) ([]byte, error) {
type secretResourceSpec struct {
secretName string
decrypted capiresource.SecretData
decrypted capeiresource.SecretData
resource plan.Resource
}
......@@ -648,7 +650,7 @@ type secretResourceSpec struct {
// directory, decrypts it using the GitHub deploy key, creates file
// resources for .pem files stored in the secret, and creates a SealedSecret resource
// for them that can be used by the machine actuator
func processPemFilesIfAny(builder *plan.Builder, providerSpec *existinginfrav1.ClusterSpec, configDir string, ns, privateKeyPath, certPath string) (map[string]*secretResourceSpec, *v1.ConfigMap, []byte, error) {
func processPemFilesIfAny(builder *plan.Builder, providerSpec *capeiv1alpha3.ClusterSpec, configDir string, ns, privateKeyPath, certPath string) (map[string]*secretResourceSpec, *v1.ConfigMap, []byte, error) {
if err := checkPemValues(providerSpec, privateKeyPath, certPath); err != nil {
return nil, nil, nil, err
}
......@@ -657,8 +659,8 @@ func processPemFilesIfAny(builder *plan.Builder, providerSpec *existinginfrav1.C
return nil, nil, nil, nil
}
b := plan.NewBuilder()
b.AddResource("create:pem-dir", &capiresource.Dir{Path: object.String(capios.PemDestDir)})
b.AddResource("set-perms:pem-dir", &capiresource.Run{Script: object.String(fmt.Sprintf("chmod 600 %s", capios.PemDestDir))}, plan.DependOn("create:pem-dir"))
b.AddResource("create:pem-dir", &capeiresource.Dir{Path: object.String(capeios.PemDestDir)})
b.AddResource("set-perms:pem-dir", &capeiresource.Run{Script: object.String(fmt.Sprintf("chmod 600 %s", capeios.PemDestDir))}, plan.DependOn("create:pem-dir"))
privateKey, err := getPrivateKey(privateKeyPath)
if err != nil {
return nil, nil, nil, err
......@@ -721,7 +723,7 @@ func getPrivateKey(privateKeyPath string) (*rsa.PrivateKey, error) {
return privateKey, nil
}
func checkPemValues(providerSpec *existinginfrav1.ClusterSpec, privateKeyPath, certPath string) error {
func checkPemValues(providerSpec *capeiv1alpha3.ClusterSpec, privateKeyPath, certPath string) error {
if privateKeyPath == "" || certPath == "" {
if providerSpec.Authentication != nil || providerSpec.Authorization != nil {
return errors.New("Encryption keys not specified; cannot process authentication and authorization specifications.")
......@@ -794,8 +796,8 @@ func processSecret(b *plan.Builder, key *rsa.PrivateKey, configDir, secretFileNa
return nil, nil, "", nil, fmt.Errorf("Missing auth config value for: %q in secret %q", key, secretName)
}
resName := secretName + "-" + key
fileName := filepath.Join(capios.PemDestDir, secretName, key+".pem")
b.AddResource("install:"+resName, &capiresource.File{Content: string(fileContents), Destination: fileName}, plan.DependOn("set-perms:pem-dir"))
fileName := filepath.Join(capeios.PemDestDir, secretName, key+".pem")
b.AddResource("install:"+resName, &capeiresource.File{Content: string(fileContents), Destination: fileName}, plan.DependOn("set-perms:pem-dir"))
decrypted[key] = fileContents
}
contextName := secretName + "-webhook"
......@@ -805,14 +807,14 @@ func processSecret(b *plan.Builder, key *rsa.PrivateKey, configDir, secretFileNa
APIVersion: "v1",
Clusters: map[string]*clientcmdapi.Cluster{
secretName: {
CertificateAuthority: filepath.Join(capios.PemDestDir, secretName, "certificate-authority.pem"),
CertificateAuthority: filepath.Join(capeios.PemDestDir, secretName, "certificate-authority.pem"),
Server: URL,
},
},
AuthInfos: map[string]*clientcmdapi.AuthInfo{
userName: {
ClientCertificate: filepath.Join(capios.PemDestDir, secretName, "client-certificate.pem"),
ClientKey: filepath.Join(capios.PemDestDir, secretName, "client-key.pem"),
ClientCertificate: filepath.Join(capeios.PemDestDir, secretName, "client-certificate.pem"),
ClientKey: filepath.Join(capeios.PemDestDir, secretName, "client-key.pem"),
},
},
CurrentContext: contextName,
......@@ -827,7 +829,7 @@ func processSecret(b *plan.Builder, key *rsa.PrivateKey, configDir, secretFileNa
if err != nil {
return nil, nil, "", nil, err
}
configResource := &capiresource.File{Content: string(authConfig), Destination: filepath.Join(capios.ConfigDestDir, secretName+".yaml")}
configResource := &capeiresource.File{Content: string(authConfig), Destination: filepath.Join(capeios.ConfigDestDir, secretName+".yaml")}
b.AddResource("install:"+secretName, configResource, plan.DependOn("set-perms:pem-dir"))
return contents, decrypted, secretName, authConfig, nil
......@@ -880,7 +882,7 @@ func configureFlux(b *plan.Builder, params SeedNodeParams) error {
if err != nil {
return errors.Wrap(err, "failed to process the git deploy key")
}
fluxAddon := existinginfrav1.Addon{Name: "flux", Params: gitParams}
fluxAddon := capeiv1alpha3.Addon{Name: "flux", Params: gitParams}
manifests, err := buildAddon(fluxAddon, params.ImageRepository, params.ClusterManifestPath, params.GetAddonNamespace("flux"))
if err != nil {
return errors.Wrap(err, "failed to generate manifests for flux")
......@@ -1048,7 +1050,7 @@ func updateControllerImage(manifest []byte, controllerImageOverride string) ([]b
}
// parseCluster converts the manifest file into a Cluster
func parseCluster(clusterManifestPath string) (eic *existinginfrav1.ExistingInfraCluster, err error) {
func parseCluster(clusterManifestPath string) (eic *capeiv1alpha3.ExistingInfraCluster, err error) {
f, err := os.Open(clusterManifestPath)
if err != nil {
return nil, err
......@@ -1082,7 +1084,7 @@ func parseAddons(ClusterManifestPath, namespace string, addonNamespaces map[stri
return ret, nil
}
func buildAddon(addonDefn existinginfrav1.Addon, imageRepository string, ClusterManifestPath, namespace string) ([][]byte, error) {
func buildAddon(addonDefn capeiv1alpha3.Addon, imageRepository string, ClusterManifestPath, namespace string) ([][]byte, error) {
log.WithField("addon", addonDefn.Name).Debug("building addon")
// Generate the addon manifest.
addon, err := addons.Get(addonDefn.Name)
......
......@@ -8,10 +8,9 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/weaveworks/wksctl/pkg/plan"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/object"
"github.com/weaveworks/wksctl/pkg/plan/resource"
"github.com/weaveworks/wksctl/pkg/utilities/object"
"github.com/weaveworks/wksctl/test/plan/testutils"
appsv1 "k8s.io/api/apps/v1"
v1beta2 "k8s.io/api/apps/v1beta2"
v1 "k8s.io/api/core/v1"
......
......@@ -9,8 +9,10 @@ import (
"github.com/blang/semver"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
capeiv1alpha3 "github.com/weaveworks/cluster-api-provider-existinginfra/apis/cluster.weave.works/v1alpha3"
capeimachine "github.com/weaveworks/cluster-api-provider-existinginfra/pkg/cluster/machine"
capeikubernetes "github.com/weaveworks/cluster-api-provider-existinginfra/pkg/kubernetes"
"github.com/weaveworks/libgitops/pkg/serializer"
existinginfrav1 "github.com/weaveworks/wksctl/pkg/existinginfra/v1alpha3"
"github.com/weaveworks/wksctl/pkg/kubernetes"
"github.com/weaveworks/wksctl/pkg/scheme"
"github.com/weaveworks/wksctl/pkg/utilities/manifest"
......@@ -42,7 +44,7 @@ func IsNode(machine *clusterv1.Machine) bool {
// FirstMaster scans the provided array of machines and return the first
// one which is a "Master" or nil if none.
// Machines and ExistingInfraMachines must be in the same order
func FirstMaster(machines []*clusterv1.Machine, bl []*existinginfrav1.ExistingInfraMachine) (*clusterv1.Machine, *existinginfrav1.ExistingInfraMachine) {
func FirstMaster(machines []*clusterv1.Machine, bl []*capeiv1alpha3.ExistingInfraMachine) (*clusterv1.Machine, *capeiv1alpha3.ExistingInfraMachine) {
// TODO: validate size and ordering of lists
for i, machine := range machines {
if IsMaster(machine) {
......@@ -53,7 +55,7 @@ func FirstMaster(machines []*clusterv1.Machine, bl []*existinginfrav1.ExistingIn
}
// ParseManifest parses the provided machines manifest file.
func ParseManifest(file string) (ml []*clusterv1.Machine, bl []*existinginfrav1.ExistingInfraMachine, err error) {
func ParseManifest(file string) (ml []*clusterv1.Machine, bl []*capeiv1alpha3.ExistingInfraMachine, err error) {
f, err := os.Open(file)
if err != nil {
return nil, nil, err
......@@ -66,7 +68,7 @@ func ParseManifest(file string) (ml []*clusterv1.Machine, bl []*existinginfrav1.
}
// Parse parses the provided machines io.Reader.
func Parse(rc io.ReadCloser) (ml []*clusterv1.Machine, bl []*existinginfrav1.ExistingInfraMachine, err error) {
func Parse(rc io.ReadCloser) (ml []*clusterv1.Machine, bl []*capeiv1alpha3.ExistingInfraMachine, err error) {
// Read from the ReadCloser YAML document-by-document
fr := serializer.NewYAMLFrameReader(rc)
......@@ -81,7 +83,7 @@ func Parse(rc io.ReadCloser) (ml []*clusterv1.Machine, bl []*existinginfrav1.Exi
switch typed := obj.(type) {
case *clusterv1.Machine:
ml = append(ml, typed)
case *existinginfrav1.ExistingInfraMachine:
case *capeiv1alpha3.ExistingInfraMachine:
bl = append(bl, typed)
default:
return nil, nil, fmt.Errorf("unexpected type %T", obj)
......@@ -92,7 +94,7 @@ func Parse(rc io.ReadCloser) (ml []*clusterv1.Machine, bl []*existinginfrav1.Exi
}
// Validate validates the provided machines.
func Validate(machines []*clusterv1.Machine, bl []*existinginfrav1.ExistingInfraMachine) field.ErrorList {
func Validate(machines []*clusterv1.Machine, bl []*capeiv1alpha3.ExistingInfraMachine) field.ErrorList {
if len(machines) == 0 { // Some other validations crash on empty list
return field.ErrorList{nonFieldError("no machines")}
}
......@@ -240,7 +242,7 @@ func populateVersions(m *clusterv1.Machine) {
if m.Spec.Version != nil {
return
}
versionCopy := kubernetes.DefaultVersion
versionCopy := capeikubernetes.DefaultVersion
m.Spec.Version = &versionCopy
}
......@@ -271,7 +273,7 @@ func Populate(machines []*clusterv1.Machine) {
// InvalidMachinesHandler encapsulates logic to apply in case of an invalid
// machines manifest being provided.
type InvalidMachinesHandler = func(machines []*clusterv1.Machine, bl []*existinginfrav1.ExistingInfraMachine, errors field.ErrorList) ([]*clusterv1.Machine, []*existinginfrav1.ExistingInfraMachine, error)
type InvalidMachinesHandler = func(machines []*clusterv1.Machine, bl []*capeiv1alpha3.ExistingInfraMachine, errors field.ErrorList) ([]*clusterv1.Machine, []*capeiv1alpha3.ExistingInfraMachine, error)
// NoOpInvalidMachinesHandler does nothing when an invalid machines manifest
// is being provided.
......@@ -281,7 +283,7 @@ var NoOpInvalidMachinesHandler = func(machines []*clusterv1.Machine, errors fiel
// ParseAndDefaultAndValidate parses the provided manifest, validates it and
// defaults values where possible.
func ParseAndDefaultAndValidate(machinesManifestPath string, errorsHandler InvalidMachinesHandler) ([]*clusterv1.Machine, []*existinginfrav1.ExistingInfraMachine, error) {
func ParseAndDefaultAndValidate(machinesManifestPath string, errorsHandler InvalidMachinesHandler) ([]*clusterv1.Machine, []*capeiv1alpha3.ExistingInfraMachine, error) {
machines, bl, err := ParseManifest(machinesManifestPath)
if err != nil {
return nil, nil, err
......@@ -306,14 +308,14 @@ func GetKubernetesVersionFromManifest(machinesManifestPath string) (string, stri
// GetKubernetesVersionFromMasterIn reads the version of the Kubernetes control
// plane from the provided machines. If no version is configured, the default
// Kubernetes version will be returned.
func GetKubernetesVersionFromMasterIn(machines []*clusterv1.Machine, bl []*existinginfrav1.ExistingInfraMachine) (string, string, error) {
func GetKubernetesVersionFromMasterIn(machines []*clusterv1.Machine, bl []*capeiv1alpha3.ExistingInfraMachine) (string, string, error) {
// Ensures all machines have the same version (either specified or empty):
errs := Validate(machines, bl)
if len(errs) > 0 {
return "", "", errs.ToAggregate()
}
machine, _ := FirstMaster(machines, bl)
version := GetKubernetesVersion(machine)
version := capeimachine.GetKubernetesVersion(machine)
ns := machine.ObjectMeta.Namespace
if ns == "" {
ns = manifest.DefaultNamespace
......
......@@ -7,9 +7,10 @@ import (
"testing"
"github.com/stretchr/testify/assert"
capeiv1alpha3 "github.com/weaveworks/cluster-api-provider-existinginfra/apis/cluster.weave.works/v1alpha3"
capeimachine "github.com/weaveworks/cluster-api-provider-existinginfra/pkg/cluster/machine"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/kubernetes"
"github.com/weaveworks/wksctl/pkg/cluster/machine"
existinginfrav1 "github.com/weaveworks/wksctl/pkg/existinginfra/v1alpha3"
"github.com/weaveworks/wksctl/pkg/kubernetes"
"github.com/weaveworks/wksctl/pkg/utilities/manifest"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
......@@ -43,7 +44,7 @@ func TestIsNode(t *testing.T) {
}
func TestFirstMasterInPointersArray(t *testing.T) {
bl := []*existinginfrav1.ExistingInfraMachine{nil, nil}
bl := []*capeiv1alpha3.ExistingInfraMachine{nil, nil}
v1, _ := machine.FirstMaster([]*clusterv1.Machine{
&worker,
&master,
......@@ -206,7 +207,7 @@ const machinesNoGodNoMaster = `
address: "172.17.8.102"
`
func machinesFromString(t *testing.T, s string) ([]*clusterv1.Machine, []*existinginfrav1.ExistingInfraMachine) {
func machinesFromString(t *testing.T, s string) ([]*clusterv1.Machine, []*capeiv1alpha3.ExistingInfraMachine) {
r := ioutil.NopCloser(strings.NewReader(s))
machines, bml, err := machine.Parse(r)
assert.NoError(t, err)
......@@ -320,6 +321,6 @@ func TestGetKubernetesVersionFromMasterInGetsControlPlaneVersion(t *testing.T) {
func TestGetKubernetesVersionDefaultsVersionWhenMachinesDoNotSpecifyAny(t *testing.T) {
machines, _ := machinesFromString(t, machinesWithoutVersions)
version := machine.GetKubernetesVersion(machines[0])
version := capeimachine.GetKubernetesVersion(machines[0])
assert.Equal(t, kubernetes.DefaultVersion, version)
}
......@@ -7,7 +7,7 @@ import (
"github.com/pkg/errors"
"github.com/thanhpk/randstr"
existinginfrav1 "github.com/weaveworks/wksctl/pkg/existinginfra/v1alpha3"
capeiv1alpha3 "github.com/weaveworks/cluster-api-provider-existinginfra/apis/cluster.weave.works/v1alpha3"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
"sigs.k8s.io/yaml"
)
......@@ -55,7 +55,7 @@ func UpdateWithGeneratedNames(r io.ReadCloser) (string, error) {
return buf.String(), err
}
func WriteMachines(w io.Writer, machines []*clusterv1.Machine, bml []*existinginfrav1.ExistingInfraMachine) error {
func WriteMachines(w io.Writer, machines []*clusterv1.Machine, bml []*capeiv1alpha3.ExistingInfraMachine) error {
// Need to do this in a loop because we want a stream not an array
for _, machine := range machines {
manifestBytes, err := yaml.Marshal(machine)
......
......@@ -8,8 +8,8 @@ import (
yaml "github.com/ghodss/yaml"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan/runners/sudo"
"github.com/weaveworks/wksctl/pkg/plan/runners/ssh"
"github.com/weaveworks/wksctl/pkg/plan/runners/sudo"
"github.com/weaveworks/wksctl/pkg/specs"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
......
package kubernetes
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/kubernetes"
"github.com/weaveworks/wksctl/pkg/utilities/version"
)
func TestMatchesRangeDefaultVersion(t *testing.T) {
matches, err := version.MatchesRange(kubernetes.DefaultVersion, DefaultVersionsRange)
assert.NoError(t, err)
assert.True(t, matches)
}
package plan
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"github.com/fatih/structs"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/wksctl/pkg/utilities/object"
)
// Runner is something that can realise a step.
type Runner interface {
// RunCommand runs a command in a shell. This means cmd can be more than one
// single command, it can be a full bourne shell script.
RunCommand(cmd string, stdin io.Reader) (stdouterr string, err error)
}
// Resource is an atomic step of the plan.
type Resource interface {
// State returns the state that this step will realize when applied.
State() State
// QueryState returns the current state of this step. For instance, if the step
// describes the installation of a package, QueryState will return if the
// package is actually installed and its version.
QueryState(runner Runner) (State, error)
// Apply this step and indicate whether downstream resources should be re-applied
Apply(runner Runner, diff Diff) (propagate bool, err error)
// Undo this step.
Undo(runner Runner, current State) error
}
type RunError struct {
ExitCode int
}
func (e *RunError) Error() string {
return fmt.Sprintf("command exited with %d", e.ExitCode)
}
// Plan is a succession of Steps to produce a desired outcome.
type Plan struct {
id string
resources map[string]Resource
graph *graph
undoCondition func(Runner, State) bool
}
var (
dummyPlan Resource = RegisterResource(&Plan{})
planTypeName = extractResourceTypeName(dummyPlan)
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/object"
)
// ParamString is a parameterizable string for passing output from one resource
......
......@@ -5,9 +5,9 @@ import (
"io/ioutil"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/object"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/controller/manifests"
existinginfrav1 "github.com/weaveworks/wksctl/pkg/existinginfra/v1alpha3"
"github.com/weaveworks/wksctl/pkg/plan"
"github.com/weaveworks/wksctl/pkg/plan/resource"
)
......
......@@ -5,7 +5,7 @@ import (
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/wksctl/pkg/plan"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan"
)
func removeFile(remotePath string, runner plan.Runner) error {
......
......@@ -8,18 +8,21 @@ import (
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/apis/wksprovider/machine/config"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/apis/wksprovider/machine/scripts"
capeiplan "github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan"
capeiresource "github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan/resource"
capeimanifest "github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/manifest"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/object"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/ssh"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/version"
"github.com/weaveworks/libgitops/pkg/serializer"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/controller/manifests"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/config"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/config/kubeadm"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/config/kubeproxy"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/scripts"
"github.com/weaveworks/wksctl/pkg/plan"
kubeadmutil "github.com/weaveworks/wksctl/pkg/utilities/kubeadm"
"github.com/weaveworks/wksctl/pkg/utilities/manifest"
"github.com/weaveworks/wksctl/pkg/utilities/object"
"github.com/weaveworks/wksctl/pkg/utilities/ssh"
"github.com/weaveworks/wksctl/pkg/utilities/version"
corev1 "k8s.io/api/core/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
"sigs.k8s.io/yaml"
......@@ -27,7 +30,7 @@ import (
// KubeadmInit represents an attempt to init a Kubernetes node via kubeadm.
type KubeadmInit struct {
base
capeiresource.Base
// PublicIP is public IP of the master node we are trying to setup here.
PublicIP string `structs:"publicIP"`
......@@ -76,17 +79,17 @@ type KubeadmInit struct {
PodCIDRBlock string
}
var _ plan.Resource = plan.RegisterResource(&KubeadmInit{})
var _ capeiplan.Resource = capeiplan.RegisterResource(&KubeadmInit{})
// State implements plan.Resource.
func (ki *KubeadmInit) State() plan.State {
return toState(ki)
func (ki *KubeadmInit) State() capeiplan.State {
return capeiresource.ToState(ki)
}
// Apply implements plan.Resource.
// TODO: find a way to make this idempotent.
// TODO: should such a resource be split into smaller resources?
func (ki *KubeadmInit) Apply(runner plan.Runner, diff plan.Diff) (bool, error) {
func (ki *KubeadmInit) Apply(runner capeiplan.Runner, diff capeiplan.Diff) (bool, error) {
log.Debug("Initializing Kubernetes cluster")
sshKey, err := ssh.ReadPrivateKey(ki.SSHKeyPath)
......@@ -151,7 +154,7 @@ func (ki *KubeadmInit) Apply(runner plan.Runner, diff plan.Diff) (bool, error) {
ki.UseIPTables,
ki.KubernetesVersion,
&stdOutErr)
_, err = p.Apply(runner, plan.EmptyDiff())
_, err = p.Apply(runner, capeiplan.EmptyDiff())
if err != nil {
return false, errors.Wrap(err, "failed to initialize Kubernetes cluster with kubeadm")
}
......@@ -186,14 +189,14 @@ func (ki *KubeadmInit) updateManifestNamespace(fileName, namespace string) ([]by
if err != nil {
return nil, errors.Wrap(err, "Failed to open manifest")
}
c, err := manifest.WithNamespace(serializer.FromBytes(content), namespace)
c, err := capeimanifest.WithNamespace(serializer.FromBytes(content), namespace)
if err != nil {
return nil, err
}
return c, nil
}
func (ki *KubeadmInit) kubectlApply(fileName, namespace string, runner plan.Runner) error {
func (ki *KubeadmInit) kubectlApply(fileName, namespace string, runner capeiplan.Runner) error {
content, err := ki.updateManifestNamespace(fileName, namespace)
if err != nil {
return errors.Wrap(err, "Failed to upate manifest namespace")
......@@ -213,7 +216,7 @@ func (ki *KubeadmInit) manifestContent(fileName string) ([]byte, error) {
return content, nil
}
func (ki *KubeadmInit) applySecretWith(sshKey []byte, discoveryTokenCaCertHash, certKey, namespace string, runner plan.Runner) error {
func (ki *KubeadmInit) applySecretWith(sshKey []byte, discoveryTokenCaCertHash, certKey, namespace string, runner capeiplan.Runner) error {
log.Info("adding SSH key to WKS secret and applying its manifest")
fileName := "03_secrets.yaml"
secret, err := ki.deserializeSecret(fileName, namespace)
......@@ -247,19 +250,19 @@ func (ki *KubeadmInit) deserializeSecret(fileName, namespace string) (*corev1.Se
}
// Undo implements plan.Resource.
func (ki *KubeadmInit) Undo(runner plan.Runner, current plan.State) error {
func (ki *KubeadmInit) Undo(runner capeiplan.Runner, current capeiplan.State) error {
remotePath := "/tmp/wks_kubeadm_init_config.yaml"
var ignored string
return buildKubeadmInitPlan(
remotePath,
strings.Join(ki.IgnorePreflightErrors, ","),
ki.UseIPTables, ki.KubernetesVersion, &ignored).Undo(
runner, plan.EmptyState)
runner, capeiplan.EmptyState)
}
// buildKubeadmInitPlan builds a plan for kubeadm init command.
// Parameter k8sversion specified here represents the version of both Kubernetes and Kubeadm.
func buildKubeadmInitPlan(path string, ignorePreflightErrors string, useIPTables bool, k8sVersion string, output *string) plan.Resource {
func buildKubeadmInitPlan(path string, ignorePreflightErrors string, useIPTables bool, k8sVersion string, output *string) capeiplan.Resource {
// Detect version for --upload-cert-flags
uploadCertsFlag := "--upload-certs"
if lt, err := version.LessThan(k8sVersion, "v1.15.0"); err == nil && lt {
......@@ -276,70 +279,70 @@ func buildKubeadmInitPlan(path string, ignorePreflightErrors string, useIPTables
// We add resources to the plan graph for both "if" and "else" paths to make all resources deterministically connected.
// The graph resources will be easier to reason about when we execute them in parallel in the future.
//
b := plan.NewBuilder()
b := capeiplan.NewBuilder()
if useIPTables {
b.AddResource(
"configure:iptables",
&Run{Script: object.String("sysctl net.bridge.bridge-nf-call-iptables=1")}) // TODO: undo?
"configrure:iptables",
&capeiresource.Run{Script: object.String("sysctl net.bridge.bridge-nf-call-iptables=1")}) // TODO: undo?
} else {
b.AddResource(
"configure:iptables",
&Run{Script: object.String("echo no operation")})
&capeiresource.Run{Script: object.String("echo no operation")})
}
if upgradeKubeadmConfig {
b.AddResource(
"kubeadm:config:upgrade",
&Run{Script: plan.ParamString(
withoutProxy("kubeadm config migrate --old-config %s --new-config %s_upgraded && mv %s_upgraded %s"), &path, &path, &path, &path),
&capeiresource.Run{Script: plan.ParamString(
capeiresource.WithoutProxy("kubeadm config migrate --old-config %s --new-config %s_upgraded && mv %s_upgraded %s"), &path, &path, &path, &path),
},
plan.DependOn("configure:iptables"),
capeiplan.DependOn("configure:iptables"),
)
} else {
b.AddResource(
"kubeadm:config:upgrade",
&Run{Script: object.String("echo no upgrade is required")},
plan.DependOn("configure:iptables"),
&capeiresource.Run{Script: object.String("echo no upgrade is required")},
capeiplan.DependOn("configure:iptables"),
)
}
b.AddResource(
"kubeadm:reset",
&Run{Script: object.String("kubeadm reset --force")},
plan.DependOn("kubeadm:config:upgrade"),
&capeiresource.Run{Script: object.String("kubeadm reset --force")},
capeiplan.DependOn("kubeadm:config:upgrade"),
).AddResource(
"kubeadm:config:images",
&Run{Script: plan.ParamString("kubeadm config images pull --config=%s", &path)},
plan.DependOn("kubeadm:reset"),
&capeiresource.Run{Script: plan.ParamString("kubeadm config images pull --config=%s", &path)},
capeiplan.DependOn("kubeadm:reset"),
).AddResource(
"kubeadm:run-init",
// N.B.: --experimental-upload-certs encrypts & uploads
// certificates of the primary control plane in the kubeadm-certs
// Secret, and prints the value for --certificate-key to STDOUT.
&Run{Script: plan.ParamString("kubeadm init --config=%s --ignore-preflight-errors=%s %s", &path, &ignorePreflightErrors, &uploadCertsFlag),
&capeiresource.Run{Script: plan.ParamString("kubeadm init --config=%s --ignore-preflight-errors=%s %s", &path, &ignorePreflightErrors, &uploadCertsFlag),
UndoResource: buildKubeadmRunInitUndoPlan(),
Output: output,
},
plan.DependOn("kubeadm:config:images"),
capeiplan.DependOn("kubeadm:config:images"),
)
var homedir string
b.AddResource(
"kubeadm:get-homedir",
&Run{Script: object.String("echo -n $HOME"), Output: &homedir},
&capeiresource.Run{Script: object.String("echo -n $HOME"), Output: &homedir},
).AddResource(
"kubeadm:config:kubectl-dir",
&Dir{Path: plan.ParamString("%s/.kube", &homedir)},
plan.DependOn("kubeadm:get-homedir"),
&capeiresource.Dir{Path: plan.ParamString("%s/.kube", &homedir)},
capeiplan.DependOn("kubeadm:get-homedir"),
).AddResource(
"kubeadm:config:copy",
&Run{Script: plan.ParamString("cp /etc/kubernetes/admin.conf %s/.kube/config", &homedir)},
plan.DependOn("kubeadm:run-init", "kubeadm:config:kubectl-dir"),
&capeiresource.Run{Script: plan.ParamString("cp /etc/kubernetes/admin.conf %s/.kube/config", &homedir)},
capeiplan.DependOn("kubeadm:run-init", "kubeadm:config:kubectl-dir"),
).AddResource(
"kubeadm:config:set-ownership",
&Run{Script: plan.ParamString("chown -R $(id -u):$(id -g) %s/.kube", &homedir)},
plan.DependOn("kubeadm:config:copy"),
&capeiresource.Run{Script: plan.ParamString("chown -R $(id -u):$(id -g) %s/.kube", &homedir)},
capeiplan.DependOn("kubeadm:config:copy"),
)
p, err := b.Plan()
......@@ -349,23 +352,23 @@ func buildKubeadmInitPlan(path string, ignorePreflightErrors string, useIPTables
return &p
}
func buildKubeadmRunInitUndoPlan() plan.Resource {
b := plan.NewBuilder()
func buildKubeadmRunInitUndoPlan() capeiplan.Resource {
b := capeiplan.NewBuilder()
b.AddResource(
"file:kube-apiserver.yaml",
&File{Destination: "/etc/kubernetes/manifests/kube-apiserver.yaml"},
&capeiresource.File{Destination: "/etc/kubernetes/manifests/kube-apiserver.yaml"},
).AddResource(
"file:kube-controller-manager.yaml",
&File{Destination: "/etc/kubernetes/manifests/kube-controller-manager.yaml"},
&capeiresource.File{Destination: "/etc/kubernetes/manifests/kube-controller-manager.yaml"},
).AddResource(
"file:kube-scheduler.yaml",
&File{Destination: "/etc/kubernetes/manifests/kube-scheduler.yaml"},
&capeiresource.File{Destination: "/etc/kubernetes/manifests/kube-scheduler.yaml"},
).AddResource(
"file:etcd.yaml",
&File{Destination: "/etc/kubernetes/manifests/etcd.yaml"},
&capeiresource.File{Destination: "/etc/kubernetes/manifests/etcd.yaml"},
).AddResource(
"dir:etcd",
&Dir{Path: object.String("/var/lib/etcd"), RecursiveDelete: true},
&capeiresource.Dir{Path: object.String("/var/lib/etcd"), RecursiveDelete: true},
)
p, err := b.Plan()
if err != nil {
......
......@@ -5,12 +5,13 @@ import (
"strings"
"github.com/pkg/errors"
"github.com/weaveworks/wksctl/pkg/plan"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan/resource"
)
// KubectlAnnotateSingleNode is a resource to apply an annotation to the only node in a cluster
type KubectlAnnotateSingleNode struct {
base
resource.Base
Key string // Which annotation to apply
Value string // Value of annotation
......@@ -20,12 +21,12 @@ var _ plan.Resource = plan.RegisterResource(&KubectlAnnotateSingleNode{})
// State implements plan.Resource.
func (ka *KubectlAnnotateSingleNode) State() plan.State {
return toState(ka)
return resource.ToState(ka)
}
// Apply fetches the node name and performs a "kubectl annotate".
func (ka *KubectlAnnotateSingleNode) Apply(runner plan.Runner, diff plan.Diff) (bool, error) {
output, err := runner.RunCommand(withoutProxy("kubectl get nodes -o name"), nil)
output, err := runner.RunCommand(resource.WithoutProxy("kubectl get nodes -o name"), nil)
if err != nil {
return false, errors.Wrapf(err, "could not fetch node name to annotate")
}
......@@ -37,7 +38,7 @@ func (ka *KubectlAnnotateSingleNode) Apply(runner plan.Runner, diff plan.Diff) (
cmd := fmt.Sprintf("kubectl annotate %q %s=%q", nodeName, ka.Key, ka.Value)
if stdouterr, err := runner.RunCommand(withoutProxy(cmd), nil); err != nil {
if stdouterr, err := runner.RunCommand(resource.WithoutProxy(cmd), nil); err != nil {
return false, errors.Wrapf(err, "failed to apply annotation %s on %s; output %s", ka.Key, nodeName, stdouterr)
}
......
......@@ -8,16 +8,17 @@ import (
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/apis/wksprovider/machine/scripts"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan/resource"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/utilities/manifest"
"github.com/weaveworks/libgitops/pkg/serializer"
"github.com/weaveworks/wksctl/pkg/apis/wksprovider/machine/scripts"
"github.com/weaveworks/wksctl/pkg/plan"
"github.com/weaveworks/wksctl/pkg/utilities/manifest"
)
// KubectlApply is a resource applying the provided manifest.
// It doesn't realise any state, Apply will always apply the manifest.
type KubectlApply struct {
base
resource.Base
// Filename is the remote manifest file name.
// Only provide this if you do NOT provide ManifestPath or ManifestURL.
......@@ -57,7 +58,7 @@ var _ plan.Resource = plan.RegisterResource(&KubectlApply{})
// State implements plan.Resource.
func (ka *KubectlApply) State() plan.State {
return toState(ka)
return resource.ToState(ka)
}
func (ka *KubectlApply) content() ([]byte, error) {
......@@ -157,7 +158,7 @@ func RunKubectlApply(r plan.Runner, args KubectlApplyArgs, fname string) error {
// Run kubectl wait, if requested.
if args.WaitCondition != "" {
cmd := fmt.Sprintf("kubectl wait --for=%q -f %q", args.WaitCondition, path)
if _, err := r.RunCommand(withoutProxy(cmd), nil); err != nil {
if _, err := r.RunCommand(resource.WithoutProxy(cmd), nil); err != nil {
return errors.Wrap(err, "kubectl wait")
}
}
......@@ -169,7 +170,7 @@ func RunKubectlApply(r plan.Runner, args KubectlApplyArgs, fname string) error {
func RunKubectlRemoteApply(remoteURL string, runner plan.Runner) error {
cmd := fmt.Sprintf("kubectl apply -f %q", remoteURL)
if stdouterr, err := runner.RunCommand(withoutProxy(cmd), nil); err != nil {
if stdouterr, err := runner.RunCommand(resource.WithoutProxy(cmd), nil); err != nil {
log.WithField("stdouterr", stdouterr).WithField("URL", remoteURL).Debug("failed to apply Kubernetes manifest")
return errors.Wrapf(err, "failed to apply manifest %s; output %s", remoteURL, stdouterr)
}
......
......@@ -6,12 +6,13 @@ import (
"time"
"github.com/pkg/errors"
"github.com/weaveworks/wksctl/pkg/plan"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan"
"github.com/weaveworks/cluster-api-provider-existinginfra/pkg/plan/resource"
)
// KubectlWait waits for an object to reach a required state
type KubectlWait struct {
base
resource.Base
// Namespace specifies the namespace in which to search for the object being waited on
WaitNamespace string `structs:"namespace"`
......@@ -29,7 +30,7 @@ var _ plan.Resource = plan.RegisterResource(&KubectlWait{})
// State implements plan.Resource.
func (kw *KubectlWait) State() plan.State {
return toState(kw)
return resource.ToState(kw)
}
// Apply performs a "kubectl wait" as specified in the receiver.
......@@ -64,7 +65,7 @@ func kubectlWait(r plan.Runner, args kubectlWaitArgs) error {
// Assume the objects to wait for should/will exist. Don't start the timeout until they are present
for {
cmd := fmt.Sprintf("kubectl get %q %s%s", args.WaitType, waitOn(args), waitNamespace(args))
output, err := r.RunCommand(withoutProxy(cmd), nil)
output, err := r.RunCommand(resource.WithoutProxy(cmd), nil)
if err != nil || strings.Contains(output, "No resources found") {
time.Sleep(500 * time.Millisecond)
} else {
......@@ -73,7 +74,7 @@ func kubectlWait(r plan.Runner, args kubectlWaitArgs) error {
}
cmd := fmt.Sprintf("kubectl wait %q --for=%q%s%s%s",
args.WaitType, args.WaitCondition, waitOn(args), waitTimeout(args), waitNamespace(args))
if _, err := r.RunCommand(withoutProxy(cmd), nil); err != nil {
if _, err := r.RunCommand(resource.WithoutProxy(cmd), nil); err != nil {
return errors.Wrap(err, "kubectl wait")
}
return nil
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment