Commit 10ee4724 authored by Darren Shepherd's avatar Darren Shepherd
Browse files

Mutually assured destruction of v1 and v3.Cluster

parent 84bf5347
Showing with 312 additions and 175 deletions
+312 -175
......@@ -23,13 +23,12 @@ type ClusterSpec struct {
ClusterAPIConfig *ClusterAPIConfig `json:"clusterAPIConfig,omitempty"`
RKEConfig *RKEConfig `json:"rkeConfig,omitempty"`
ReferencedConfig *ReferencedConfig `json:"referencedConfig,omitempty"`
RancherValues rkev1.GenericMap `json:"rancherValues,omitempty" wrangler:"nullable"`
AgentEnvVars []corev1.EnvVar `json:"agentEnvVars,omitempty"`
DefaultPodSecurityPolicyTemplateName string `json:"defaultPodSecurityPolicyTemplateName,omitempty" norman:"type=reference[podSecurityPolicyTemplate]"`
DefaultClusterRoleForProjectMembers string `json:"defaultClusterRoleForProjectMembers,omitempty" norman:"type=reference[roleTemplate]"`
EnableNetworkPolicy *bool `json:"enableNetworkPolicy" norman:"default=false"`
EnableNetworkPolicy *bool `json:"enableNetworkPolicy,omitempty" norman:"default=false"`
}
type ClusterStatus struct {
......@@ -49,7 +48,3 @@ type ImportedConfig struct {
type ClusterAPIConfig struct {
ClusterName string `json:"clusterName,omitempty"`
}
type ReferencedConfig struct {
ManagementClusterName string `json:"managementClusterName,omitempty"`
}
......@@ -94,11 +94,8 @@ func main() {
},
"fleet.cattle.io": {
Types: []interface{}{
fleet.GitRepo{},
fleet.Bundle{},
fleet.Cluster{},
fleet.ClusterGroup{},
fleet.ClusterRegistrationToken{},
},
},
"rke.cattle.io": {
......
......@@ -5,8 +5,6 @@ import (
"github.com/rancher/rancher/pkg/controllers/dashboard/apiservice"
"github.com/rancher/rancher/pkg/controllers/dashboard/fleetcharts"
"github.com/rancher/rancher/pkg/controllers/dashboard/fleetcluster"
"github.com/rancher/rancher/pkg/controllers/dashboard/fleetworkspace"
"github.com/rancher/rancher/pkg/controllers/dashboard/helm"
"github.com/rancher/rancher/pkg/controllers/dashboard/kubernetesprovider"
"github.com/rancher/rancher/pkg/controllers/dashboard/scaleavailable"
......@@ -49,10 +47,3 @@ func Register(ctx context.Context, wrangler *wrangler.Context) error {
return nil
}
func RegisterFleet(ctx context.Context, wrangler *wrangler.Context) {
if features.Fleet.Enabled() {
fleetcluster.Register(ctx, wrangler)
fleetworkspace.Register(ctx, wrangler)
}
}
......@@ -4,11 +4,12 @@ import (
"context"
"crypto/sha256"
"encoding/hex"
"errors"
"regexp"
"github.com/rancher/norman/types/convert"
v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
v1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1"
capicontrollers "github.com/rancher/rancher/pkg/generated/controllers/cluster.x-k8s.io/v1alpha4"
mgmtcontrollers "github.com/rancher/rancher/pkg/generated/controllers/management.cattle.io/v3"
rocontrollers "github.com/rancher/rancher/pkg/generated/controllers/provisioning.cattle.io/v1"
"github.com/rancher/rancher/pkg/provisioningv2/kubeconfig"
......@@ -20,6 +21,7 @@ import (
"github.com/rancher/wrangler/pkg/kstatus"
"github.com/rancher/wrangler/pkg/name"
"github.com/rancher/wrangler/pkg/relatedresource"
"github.com/rancher/wrangler/pkg/yaml"
apierror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
......@@ -27,9 +29,12 @@ import (
)
const (
ByCluster = "by-cluster"
creatorIDAnn = "field.cattle.io/creatorId"
managedAnnotation = "provisioning.cattle.io/managed"
ByCluster = "by-cluster"
creatorIDAnn = "field.cattle.io/creatorId"
)
var (
mgmtNameRegexp = regexp.MustCompile("c-[a-z0-9]{5}|local")
)
type handler struct {
......@@ -40,9 +45,11 @@ type handler struct {
clusters rocontrollers.ClusterController
clusterCache rocontrollers.ClusterCache
secretCache corecontrollers.SecretCache
settings mgmtcontrollers.SettingCache
kubeconfigManager *kubeconfig.Manager
apply apply.Apply
capiClusters capicontrollers.ClusterCache
capiMachines capicontrollers.MachineCache
}
func Register(
......@@ -53,17 +60,25 @@ func Register(
mgmtClusters: clients.Mgmt.Cluster(),
clusterTokenCache: clients.Mgmt.ClusterRegistrationToken().Cache(),
clusterTokens: clients.Mgmt.ClusterRegistrationToken(),
settings: clients.Mgmt.Setting().Cache(),
clusters: clients.Provisioning.Cluster(),
clusterCache: clients.Provisioning.Cluster().Cache(),
secretCache: clients.Core.Secret().Cache(),
capiClusters: clients.CAPI.Cluster().Cache(),
capiMachines: clients.CAPI.Machine().Cache(),
kubeconfigManager: kubeconfig.New(clients),
apply: clients.Apply.WithCacheTypes(
clients.Provisioning.Cluster(),
clients.Mgmt.Cluster()),
}
clients.Provisioning.Cluster().OnChange(ctx, "cluster-label", h.addLabel)
mgmtcontrollers.RegisterClusterGeneratingHandler(ctx,
clients.Mgmt.Cluster(),
clients.Apply.WithCacheTypes(clients.Provisioning.Cluster()),
"",
"provisioning-cluster-create",
h.generateProvisioning,
nil)
rocontrollers.RegisterClusterGeneratingHandler(ctx,
clients.Provisioning.Cluster(),
clients.Apply.WithCacheTypes(clients.Mgmt.Cluster(),
......@@ -78,13 +93,16 @@ func Register(
AllowClusterScoped: true,
},
)
clients.Mgmt.Cluster().OnChange(ctx, "cluster-watch", h.createToken)
clusterCache := clients.Provisioning.Cluster().Cache()
relatedresource.Watch(ctx, "cluster-watch", h.clusterWatch,
clients.Provisioning.Cluster(), clients.Mgmt.Cluster())
clusterCache.AddIndexer(ByCluster, byClusterIndex)
clients.Provisioning.Cluster().Informer().GetIndexer().ListKeys()
clients.Mgmt.Cluster().OnRemove(ctx, "mgmt-cluster-remove", h.OnMgmtClusterRemove)
clients.Provisioning.Cluster().OnRemove(ctx, "provisioning-cluster-remove", h.OnClusterRemove)
}
func byClusterIndex(obj *v1.Cluster) ([]string, error) {
......@@ -94,22 +112,6 @@ func byClusterIndex(obj *v1.Cluster) ([]string, error) {
return []string{obj.Status.ClusterName}, nil
}
func (h *handler) addLabel(_ string, cluster *v1.Cluster) (*v1.Cluster, error) {
if cluster == nil {
return nil, nil
}
if cluster.Labels["metadata.name"] == cluster.Name {
return cluster, nil
}
cluster = cluster.DeepCopy()
if cluster.Labels == nil {
cluster.Labels = map[string]string{}
}
cluster.Labels["metadata.name"] = cluster.Name
return h.clusters.Update(cluster)
}
func (h *handler) clusterWatch(namespace, name string, obj runtime.Object) ([]relatedresource.Key, error) {
cluster, ok := obj.(*v3.Cluster)
if !ok {
......@@ -128,17 +130,32 @@ func (h *handler) clusterWatch(namespace, name string, obj runtime.Object) ([]re
}, nil
}
func (h *handler) generateCluster(cluster *v1.Cluster, status v1.ClusterStatus) ([]runtime.Object, v1.ClusterStatus, error) {
if cluster.Spec.ReferencedConfig != nil {
return h.referenceCluster(cluster, status)
func (h *handler) isLegacyCluster(cluster interface{}) bool {
if c, ok := cluster.(*v3.Cluster); ok {
return mgmtNameRegexp.MatchString(c.Name)
} else if c, ok := cluster.(*v1.Cluster); ok {
return mgmtNameRegexp.MatchString(c.Name)
}
return false
}
if owningCluster, err := h.apply.FindOwner(cluster); errors.Is(err, apply.ErrOwnerNotFound) || errors.Is(err, apply.ErrNoInformerFound) {
} else if _, ok := owningCluster.(*v3.Cluster); err == nil && ok {
// Do not generate v3.Cluster if this cluster was generated from a v3.Cluster
func (h *handler) generateProvisioning(cluster *v3.Cluster, status v3.ClusterStatus) ([]runtime.Object, v3.ClusterStatus, error) {
if !h.isLegacyCluster(cluster) || cluster.Spec.FleetWorkspaceName == "" {
return nil, status, nil
}
return []runtime.Object{
&v1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: cluster.Name,
Namespace: cluster.Spec.FleetWorkspaceName,
Labels: yaml.CleanAnnotationsForExport(cluster.Labels),
Annotations: yaml.CleanAnnotationsForExport(cluster.Annotations),
},
},
}, status, nil
}
func (h *handler) generateCluster(cluster *v1.Cluster, status v1.ClusterStatus) ([]runtime.Object, v1.ClusterStatus, error) {
switch {
case cluster.Spec.ClusterAPIConfig != nil:
return h.createClusterAndDeployAgent(cluster, status)
......@@ -165,7 +182,7 @@ func NormalizeCluster(cluster *v3.Cluster) (runtime.Object, error) {
}
func (h *handler) createToken(_ string, cluster *v3.Cluster) (*v3.Cluster, error) {
if cluster == nil || cluster.Annotations[managedAnnotation] != "true" {
if cluster == nil {
return cluster, nil
}
_, err := h.clusterTokenCache.Get(cluster.Name, "default-token")
......@@ -185,6 +202,22 @@ func (h *handler) createToken(_ string, cluster *v3.Cluster) (*v3.Cluster, error
}
func (h *handler) createCluster(cluster *v1.Cluster, status v1.ClusterStatus, spec v3.ClusterSpec) ([]runtime.Object, v1.ClusterStatus, error) {
if h.isLegacyCluster(cluster) {
mgmtCluster, err := h.mgmtClusterCache.Get(cluster.Name)
if err != nil {
return nil, status, err
}
return h.updateStatus(nil, cluster, status, mgmtCluster)
}
return h.createNewCluster(cluster, status, spec)
}
func mgmtClusterName(clusterNamespace, clusterName string) string {
hash := sha256.Sum256([]byte(clusterNamespace + "/" + clusterName))
return name.SafeConcatName("c", "m", hex.EncodeToString(hash[:])[:8])
}
func (h *handler) createNewCluster(cluster *v1.Cluster, status v1.ClusterStatus, spec v3.ClusterSpec) ([]runtime.Object, v1.ClusterStatus, error) {
spec.DisplayName = cluster.Name
spec.Description = cluster.Annotations["field.cattle.io/description"]
spec.FleetWorkspaceName = cluster.Namespace
......@@ -201,10 +234,9 @@ func (h *handler) createCluster(cluster *v1.Cluster, status v1.ClusterStatus, sp
}
}
hash := sha256.Sum256([]byte(cluster.Namespace + "/" + cluster.Name))
newCluster := &v3.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: name.SafeConcatName("c", "m", hex.EncodeToString(hash[:])[:8]),
Name: mgmtClusterName(cluster.Namespace, cluster.Name),
Labels: cluster.Labels,
Annotations: map[string]string{},
},
......@@ -221,7 +253,6 @@ func (h *handler) createCluster(cluster *v1.Cluster, status v1.ClusterStatus, sp
}
newCluster.Annotations[creatorIDAnn] = userName
newCluster.Annotations[managedAnnotation] = "true"
normalizedCluster, err := NormalizeCluster(newCluster)
if err != nil {
......@@ -260,7 +291,9 @@ func (h *handler) updateStatus(objs []runtime.Object, cluster *v1.Cluster, statu
return nil, status, err
}
if secret != nil {
objs = append(objs, secret)
if secret.UID == "" {
objs = append(objs, secret)
}
status.ClientSecretName = secret.Name
ctrb, err := h.kubeconfigManager.GetCTRBForAdmin(cluster, status)
......
......@@ -14,7 +14,6 @@ import (
"github.com/rancher/wrangler/pkg/generic"
"github.com/rancher/wrangler/pkg/yaml"
apierror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/clientcmd"
......@@ -56,15 +55,6 @@ func (h *handler) deployAgent(cluster *v1.Cluster, status v1.ClusterStatus) (boo
}
if len(tokens) == 0 {
_, err := h.clusterTokens.Create(&v3.ClusterRegistrationToken{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "import-",
Namespace: status.ClusterName,
},
Spec: v3.ClusterRegistrationTokenSpec{
ClusterName: status.ClusterName,
},
})
h.clusters.EnqueueAfter(cluster.Namespace, cluster.Name, 2*time.Second)
return false, err
}
......
package cluster
import (
"fmt"
v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
v1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
)
const (
claimedLabelNamespace = "provisioning.cattle.io/claimed-by-namespace"
claimedLabelName = "provisioning.cattle.io/claimed-by-name"
)
func (h *handler) referenceCluster(cluster *v1.Cluster, status v1.ClusterStatus) ([]runtime.Object, v1.ClusterStatus, error) {
rCluster, err := h.claimCluster(cluster, status)
if err != nil {
return nil, status, err
}
return h.updateStatus(nil, cluster, status, rCluster)
}
func (h *handler) claimCluster(cluster *v1.Cluster, status v1.ClusterStatus) (*v3.Cluster, error) {
if status.ClusterName != "" {
return h.mgmtClusterCache.Get(status.ClusterName)
}
if cluster.Spec.ReferencedConfig.ManagementClusterName == "" {
return nil, fmt.Errorf("missing managementClusterName for referenced cluster %s/%s", cluster.Namespace, cluster.Name)
}
claimed, err := h.mgmtClusterCache.List(labels.SelectorFromSet(map[string]string{
claimedLabelName: cluster.Name,
claimedLabelNamespace: cluster.Namespace,
}))
if err != nil {
return nil, err
}
if len(claimed) > 1 {
return nil, fmt.Errorf("more than one (%d) cluster is claimed by %s/%s remove %s and %s label on the undesired clusters",
len(claimed), cluster.Namespace, cluster.Name, claimedLabelNamespace, claimedLabelName)
}
if len(claimed) == 1 {
return claimed[0], nil
}
available, err := h.mgmtClusterCache.Get(cluster.Spec.ReferencedConfig.ManagementClusterName)
if err != nil {
return nil, err
}
if available.Labels[claimedLabelName] != "" || available.Labels[claimedLabelNamespace] != "" {
return nil, fmt.Errorf("cluster %s/%s is already claimed by %s/%s, can not claim for %s/%s",
available.Namespace, available.Name,
available.Labels[claimedLabelNamespace], available.Labels[claimedLabelName],
cluster.Namespace, cluster.Name)
}
updated := available.DeepCopy()
if updated.Labels == nil {
updated.Labels = map[string]string{}
}
updated.Labels[claimedLabelName] = cluster.Name
updated.Labels[claimedLabelNamespace] = cluster.Namespace
return h.mgmtClusters.Update(updated)
}
package cluster
import (
"fmt"
"sort"
"time"
v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
v1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1"
"github.com/rancher/wrangler/pkg/condition"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
)
const (
capiClusterLabel = "cluster.x-k8s.io/cluster-name"
)
var (
Removed = condition.Cond("Removed")
)
func (h *handler) OnMgmtClusterRemove(key string, cluster *v3.Cluster) (*v3.Cluster, error) {
provisioningClusters, err := h.clusterCache.GetByIndex(ByCluster, cluster.Name)
if err != nil {
return nil, err
}
for _, provisioningCluster := range provisioningClusters {
if err := h.clusters.Delete(provisioningCluster.Namespace, provisioningCluster.Name, nil); err != nil {
return nil, err
}
}
return cluster, nil
}
func (h *handler) updateClusterStatus(cluster *v1.Cluster, status v1.ClusterStatus, previousErr error) (*v1.Cluster, error) {
if equality.Semantic.DeepEqual(status, cluster.Status) {
return cluster, previousErr
}
cluster = cluster.DeepCopy()
cluster.Status = status
cluster, err := h.clusters.UpdateStatus(cluster)
if err != nil {
return cluster, err
}
return cluster, previousErr
}
func (h *handler) OnClusterRemove(_ string, cluster *v1.Cluster) (*v1.Cluster, error) {
status := cluster.Status.DeepCopy()
message, err := h.doClusterRemove(cluster)
if err != nil {
Removed.SetError(status, "", err)
return h.updateClusterStatus(cluster, *status, err)
}
if message == "" {
Removed.SetStatusBool(status, true)
Removed.Reason(status, "")
Removed.Message(status, "")
} else {
Removed.SetStatus(status, "Unknown")
Removed.Reason(status, "Waiting")
Removed.Message(status, message)
h.clusters.EnqueueAfter(cluster.Namespace, cluster.Name, 5*time.Second)
}
return h.updateClusterStatus(cluster, *status, nil)
}
func (h *handler) doClusterRemove(cluster *v1.Cluster) (string, error) {
if cluster.Status.ClusterName != "" {
err := h.mgmtClusters.Delete(cluster.Status.ClusterName, nil)
if err != nil && !apierrors.IsNotFound(err) {
return "", err
}
_, err = h.mgmtClusterCache.Get(cluster.Status.ClusterName)
if !apierrors.IsNotFound(err) {
return fmt.Sprintf("waiting for cluster [%s] to delete", cluster.Status.ClusterName), nil
}
}
machines, err := h.capiMachines.List(cluster.Namespace, labels.SelectorFromSet(labels.Set{
capiClusterLabel: cluster.Name,
}))
if err != nil {
return "", err
}
sort.Slice(machines, func(i, j int) bool {
return machines[i].Name < machines[j].Name
})
for _, machine := range machines {
return fmt.Sprintf("waiting for machine [%s] to delete", machine.Name), nil
}
_, err = h.capiClusters.Get(cluster.Namespace, cluster.Name)
if apierrors.IsNotFound(err) {
return "", nil
}
return fmt.Sprintf("waiting for cluster-api cluster [%s] to delete", cluster.Name), nil
}
......@@ -4,6 +4,8 @@ import (
"context"
"github.com/rancher/rancher/pkg/controllers/provisioningv2/cluster"
"github.com/rancher/rancher/pkg/controllers/provisioningv2/fleetcluster"
"github.com/rancher/rancher/pkg/controllers/provisioningv2/fleetworkspace"
"github.com/rancher/rancher/pkg/controllers/provisioningv2/managedchart"
"github.com/rancher/rancher/pkg/controllers/provisioningv2/rke2/bootstrap"
"github.com/rancher/rancher/pkg/controllers/provisioningv2/rke2/dynamicschema"
......@@ -30,6 +32,8 @@ func Register(ctx context.Context, clients *wrangler.Context) error {
if features.Fleet.Enabled() {
managedchart.Register(ctx, clients)
fleetcluster.Register(ctx, clients)
fleetworkspace.Register(ctx, clients)
}
if features.RKE2.Enabled() {
......
......@@ -2,40 +2,37 @@ package fleetcluster
import (
"context"
"encoding/json"
"errors"
jsonpatch "github.com/evanphx/json-patch"
fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1"
mgmt "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
v1 "github.com/rancher/rancher/pkg/apis/provisioning.cattle.io/v1"
mgmtcontrollers "github.com/rancher/rancher/pkg/generated/controllers/management.cattle.io/v3"
rocontrollers "github.com/rancher/rancher/pkg/generated/controllers/provisioning.cattle.io/v1"
"github.com/rancher/rancher/pkg/settings"
"github.com/rancher/rancher/pkg/wrangler"
"github.com/rancher/wrangler/pkg/apply"
"github.com/rancher/wrangler/pkg/generic"
"github.com/rancher/wrangler/pkg/relatedresource"
"github.com/rancher/wrangler/pkg/yaml"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/validation"
)
type handler struct {
clusters mgmtcontrollers.ClusterClient
apply apply.Apply
clusters mgmtcontrollers.ClusterClient
clustersCache mgmtcontrollers.ClusterCache
apply apply.Apply
}
func Register(ctx context.Context, clients *wrangler.Context) {
h := &handler{
clusters: clients.Mgmt.Cluster(),
apply: clients.Apply.WithCacheTypes(clients.Provisioning.Cluster()),
clusters: clients.Mgmt.Cluster(),
clustersCache: clients.Mgmt.Cluster().Cache(),
apply: clients.Apply.WithCacheTypes(clients.Provisioning.Cluster()),
}
clients.Mgmt.Cluster().OnChange(ctx, "fleet-cluster-label", h.addLabel)
mgmtcontrollers.RegisterClusterGeneratingHandler(ctx,
clients.Mgmt.Cluster(),
rocontrollers.RegisterClusterGeneratingHandler(ctx,
clients.Provisioning.Cluster(),
clients.Apply.
WithCacheTypes(clients.Fleet.Cluster(),
clients.Provisioning.Cluster()),
......@@ -45,26 +42,10 @@ func Register(ctx context.Context, clients *wrangler.Context) {
nil,
)
relatedresource.WatchClusterScoped(ctx, "fleet-cluster-resolver", h.clusterToCluster,
clients.Mgmt.Cluster(), clients.Provisioning.Cluster())
clients.Mgmt.Cluster().OnChange(ctx, "fleet-cluster-assign", h.assignWorkspace)
}
func (h *handler) clusterToCluster(namespace, name string, obj runtime.Object) ([]relatedresource.Key, error) {
owner, err := h.apply.FindOwner(obj)
if err != nil {
// ignore error
return nil, nil
}
if c, ok := owner.(*v1.Cluster); ok {
return []relatedresource.Key{{
Namespace: c.Namespace,
Name: c.Name,
}}, nil
}
return nil, nil
}
func (h *handler) addLabel(key string, cluster *mgmt.Cluster) (*mgmt.Cluster, error) {
func (h *handler) assignWorkspace(key string, cluster *mgmt.Cluster) (*mgmt.Cluster, error) {
if cluster == nil {
return cluster, nil
}
......@@ -72,11 +53,7 @@ func (h *handler) addLabel(key string, cluster *mgmt.Cluster) (*mgmt.Cluster, er
if cluster.Spec.Internal && cluster.Spec.FleetWorkspaceName == "" {
newCluster := cluster.DeepCopy()
newCluster.Spec.FleetWorkspaceName = "fleet-local"
patch, err := generatePatch(cluster, newCluster)
if err != nil {
return cluster, err
}
return h.clusters.Patch(cluster.Name, types.MergePatchType, patch)
return h.clusters.Update(newCluster)
} else if cluster.Spec.Internal {
return cluster, nil
}
......@@ -89,104 +66,41 @@ func (h *handler) addLabel(key string, cluster *mgmt.Cluster) (*mgmt.Cluster, er
newCluster := cluster.DeepCopy()
newCluster.Spec.FleetWorkspaceName = def
patch, err := generatePatch(cluster, newCluster)
if err != nil {
return cluster, err
}
cluster, err = h.clusters.Patch(cluster.Name, types.MergePatchType, patch)
if err != nil {
return nil, err
}
}
if cluster.Spec.FleetWorkspaceName == "" {
return cluster, nil
return h.clusters.Update(newCluster)
}
return cluster, nil
}
func (h *handler) createCluster(mgmtCluster *mgmt.Cluster, status mgmt.ClusterStatus) ([]runtime.Object, mgmt.ClusterStatus, error) {
if mgmtCluster.Spec.FleetWorkspaceName == "" {
func (h *handler) createCluster(cluster *v1.Cluster, status v1.ClusterStatus) ([]runtime.Object, v1.ClusterStatus, error) {
if status.ClusterName == "" || status.ClientSecretName == "" {
return nil, status, nil
}
if !mgmt.ClusterConditionReady.IsTrue(mgmtCluster) {
return nil, status, generic.ErrSkip
mgmtCluster, err := h.clustersCache.Get(status.ClusterName)
if err != nil {
return nil, status, err
}
var (
secretName = mgmtCluster.Name + "-kubeconfig"
fleetClusterName = mgmtCluster.Name
rClusterName = mgmtCluster.Name
createCluster = true
objs []runtime.Object
)
if owningCluster, err := h.apply.FindOwner(mgmtCluster); errors.Is(err, apply.ErrOwnerNotFound) || errors.Is(err, apply.ErrNoInformerFound) {
} else if err != nil {
return nil, status, err
} else if rCluster, ok := owningCluster.(*v1.Cluster); ok {
if rCluster.Status.ClientSecretName == "" {
return nil, status, generic.ErrSkip
}
createCluster = false
fleetClusterName = rCluster.Name
rClusterName = rCluster.Name
secretName = rCluster.Status.ClientSecretName
if !mgmt.ClusterConditionReady.IsTrue(mgmtCluster) {
return nil, status, generic.ErrSkip
}
labels := yaml.CleanAnnotationsForExport(mgmtCluster.Labels)
labels["management.cattle.io/cluster-name"] = mgmtCluster.Name
labels["metadata.name"] = rClusterName
if errs := validation.IsValidLabelValue(mgmtCluster.Spec.DisplayName); len(errs) == 0 {
labels["management.cattle.io/cluster-display-name"] = mgmtCluster.Spec.DisplayName
}
if createCluster {
objs = append(objs, &v1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: rClusterName,
Namespace: mgmtCluster.Spec.FleetWorkspaceName,
Labels: labels,
},
Spec: v1.ClusterSpec{
ReferencedConfig: &v1.ReferencedConfig{
ManagementClusterName: mgmtCluster.Name,
},
AgentEnvVars: mgmtCluster.Spec.AgentEnvVars,
DefaultPodSecurityPolicyTemplateName: mgmtCluster.Spec.DefaultPodSecurityPolicyTemplateName,
DefaultClusterRoleForProjectMembers: mgmtCluster.Spec.DefaultClusterRoleForProjectMembers,
EnableNetworkPolicy: mgmtCluster.Spec.EnableNetworkPolicy,
},
})
}
objs = append(objs, &fleet.Cluster{
return []runtime.Object{&fleet.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: fleetClusterName,
Namespace: mgmtCluster.Spec.FleetWorkspaceName,
Name: cluster.Name,
Namespace: cluster.Namespace,
Labels: labels,
},
Spec: fleet.ClusterSpec{
KubeConfigSecret: secretName,
KubeConfigSecret: status.ClientSecretName,
AgentEnvVars: mgmtCluster.Spec.AgentEnvVars,
},
})
return objs, status, nil
}
func generatePatch(old, new *mgmt.Cluster) ([]byte, error) {
oldData, err := json.Marshal(old)
if err != nil {
return nil, err
}
newData, err := json.Marshal(new)
if err != nil {
return nil, err
}
return jsonpatch.CreateMergePatch(oldData, newData)
}}, status, nil
}
......@@ -54,8 +54,8 @@ func Register(ctx context.Context, clients *wrangler.Context) {
}
return obj, h.onFleetObject(obj)
})
clients.Fleet.GitRepo().OnChange(ctx, "workspace-backport",
func(s string, obj *fleet.GitRepo) (*fleet.GitRepo, error) {
clients.Fleet.Bundle().OnChange(ctx, "workspace-backport",
func(s string, obj *fleet.Bundle) (*fleet.Bundle, error) {
if obj == nil {
return nil, nil
}
......
......@@ -134,15 +134,9 @@ func (h *handler) OnChange(cluster *rancherv1.Cluster, status rancherv1.ClusterS
},
Targets: []v1alpha1.BundleTarget{
{
ClusterName: cluster.Name,
ClusterSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "metadata.name",
Operator: metav1.LabelSelectorOpIn,
Values: []string{
cluster.Name,
},
},
{
Key: "provisioning.cattle.io/unmanaged-system-agent",
Operator: metav1.LabelSelectorOpDoesNotExist,
......
......@@ -35,15 +35,9 @@ func (h *handler) OnChangeInstallSUC(cluster *rancherv1.Cluster, status rancherv
},
Targets: []v1alpha1.BundleTarget{
{
ClusterName: cluster.Name,
ClusterSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "metadata.name",
Operator: metav1.LabelSelectorOpIn,
Values: []string{
cluster.Name,
},
},
{
Key: "provisioning.cattle.io/unmanaged-system-agent",
Operator: metav1.LabelSelectorOpDoesNotExist,
......
......@@ -157,7 +157,7 @@ func (h *handler) OnRancherClusterChange(obj *rancherv1.Cluster, status rancherv
return nil, status, err
}
objs, err := objects(obj, h.dynamic, h.dynamicSchema)
objs, err := objects(obj, h.dynamic, h.dynamicSchema, h.secretCache)
return objs, status, err
}
......
......@@ -12,6 +12,7 @@ import (
"github.com/rancher/rancher/pkg/provisioningv2/rke2/planner"
"github.com/rancher/wrangler/pkg/data"
"github.com/rancher/wrangler/pkg/data/convert"
v1 "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/gvk"
"github.com/rancher/wrangler/pkg/name"
corev1 "k8s.io/api/core/v1"
......@@ -32,7 +33,7 @@ func getInfraRef(rkeCluster *rkev1.RKECluster) *corev1.ObjectReference {
return infraRef
}
func objects(cluster *rancherv1.Cluster, dynamic *dynamic.Controller, dynamicSchema mgmtcontroller.DynamicSchemaCache) (result []runtime.Object, _ error) {
func objects(cluster *rancherv1.Cluster, dynamic *dynamic.Controller, dynamicSchema mgmtcontroller.DynamicSchemaCache, secrets v1.SecretCache) (result []runtime.Object, _ error) {
infraRef := cluster.Spec.RKEConfig.InfrastructureRef
if infraRef == nil {
rkeCluster := rkeCluster(cluster)
......@@ -46,7 +47,7 @@ func objects(cluster *rancherv1.Cluster, dynamic *dynamic.Controller, dynamicSch
capiCluster := capiCluster(cluster, rkeControlPlane, infraRef)
result = append(result, capiCluster)
machineDeployments, err := machineDeployments(cluster, capiCluster, dynamic, dynamicSchema)
machineDeployments, err := machineDeployments(cluster, capiCluster, dynamic, dynamicSchema, secrets)
if err != nil {
return nil, err
}
......@@ -73,7 +74,7 @@ func pruneBySchema(kind string, data map[string]interface{}, dynamicSchema mgmtc
}
func toMachineTemplate(nodePoolName string, cluster *rancherv1.Cluster, nodePool rancherv1.RKENodePool,
dynamic *dynamic.Controller, dynamicSchema mgmtcontroller.DynamicSchemaCache) (runtime.Object, error) {
dynamic *dynamic.Controller, dynamicSchema mgmtcontroller.DynamicSchemaCache, secrets v1.SecretCache) (runtime.Object, error) {
apiVersion := nodePool.NodeConfig.APIVersion
kind := nodePool.NodeConfig.Kind
if apiVersion == "" {
......@@ -101,11 +102,17 @@ func toMachineTemplate(nodePoolName string, cluster *rancherv1.Cluster, nodePool
}
nodePoolData.Set("common", commonData)
if cluster.Spec.CloudCredentialSecretName != "" {
nodePoolData.SetNested(cluster.Spec.CloudCredentialSecretName, "common", "cloudCredentialSecretName")
}
secretName := cluster.Spec.CloudCredentialSecretName
if nodePool.CloudCredentialSecretName != "" {
nodePoolData.SetNested(nodePool.CloudCredentialSecretName, "common", "cloudCredentialSecretName")
secretName = nodePool.CloudCredentialSecretName
}
if secretName != "" {
_, err := secrets.Get(cluster.Namespace, secretName)
if err != nil {
return nil, err
}
nodePoolData.SetNested(secretName, "common", "cloudCredentialSecretName")
}
return &unstructured.Unstructured{
......@@ -126,7 +133,7 @@ func toMachineTemplate(nodePoolName string, cluster *rancherv1.Cluster, nodePool
}
func machineDeployments(cluster *rancherv1.Cluster, capiCluster *capi.Cluster, dynamic *dynamic.Controller,
dynamicSchema mgmtcontroller.DynamicSchemaCache) (result []runtime.Object, _ error) {
dynamicSchema mgmtcontroller.DynamicSchemaCache, secrets v1.SecretCache) (result []runtime.Object, _ error) {
bootstrapName := name.SafeConcatName(cluster.Name, "bootstrap", "template")
if len(cluster.Spec.RKEConfig.NodePools) > 0 {
......@@ -163,7 +170,7 @@ func machineDeployments(cluster *rancherv1.Cluster, capiCluster *capi.Cluster, d
)
if nodePool.NodeConfig.APIVersion == "" || nodePool.NodeConfig.APIVersion == "provisioning.cattle.io/v1" {
machineTemplate, err := toMachineTemplate(nodePoolName, cluster, nodePool, dynamic, dynamicSchema)
machineTemplate, err := toMachineTemplate(nodePoolName, cluster, nodePool, dynamic, dynamicSchema, secrets)
if err != nil {
return nil, err
}
......
......@@ -220,6 +220,8 @@ func (m *Manager) getKubeConfigData(clusterNamespace, clusterName, secretName, m
secret, err := m.secretCache.Get(clusterNamespace, secretName)
if err == nil {
return secret.Data, nil
} else if !apierror.IsNotFound(err) {
return nil, err
}
lockID := clusterNamespace + "/" + clusterName
......@@ -280,15 +282,15 @@ func (m *Manager) getKubeConfigData(clusterNamespace, clusterName, secretName, m
}
func (m *Manager) GetKubeConfig(cluster *v1.Cluster, status v1.ClusterStatus) (*corev1.Secret, error) {
var (
name = getKubeConfigSecretName(cluster.Name)
)
if cluster.Spec.ClusterAPIConfig != nil {
name = getKubeConfigSecretName(cluster.Spec.ClusterAPIConfig.ClusterName)
return m.secretCache.Get(cluster.Namespace, getKubeConfigSecretName(cluster.Spec.ClusterAPIConfig.ClusterName))
}
data, err := m.getKubeConfigData(cluster.Namespace, cluster.Name, name, status.ClusterName)
var (
secretName = getKubeConfigSecretName(cluster.Name)
)
data, err := m.getKubeConfigData(cluster.Namespace, cluster.Name, secretName, status.ClusterName)
if err != nil {
return nil, err
}
......@@ -296,7 +298,7 @@ func (m *Manager) GetKubeConfig(cluster *v1.Cluster, status v1.ClusterStatus) (*
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: cluster.Namespace,
Name: name,
Name: secretName,
},
Data: data,
}, nil
......
......@@ -198,15 +198,8 @@ func (r *Rancher) Start(ctx context.Context) error {
if err := dashboarddata.Add(ctx, r.Wrangler, localClusterEnabled(r.opts), r.opts.AddLocal == "false", r.opts.Embedded); err != nil {
return err
}
err := r.Wrangler.StartWithTransaction(ctx, func(ctx context.Context) error {
return dashboard.Register(ctx, r.Wrangler)
})
if err != nil {
return err
}
return r.Wrangler.StartWithTransaction(ctx, func(ctx context.Context) error {
dashboard.RegisterFleet(ctx, r.Wrangler)
return nil
return dashboard.Register(ctx, r.Wrangler)
})
})
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment