Unverified Commit aa0f9a3a authored by Alexander Matyushentsev's avatar Alexander Matyushentsev Committed by GitHub
Browse files

Issue #1167 - Implement orphan resources support (#2103)

parent b85d3e59
Showing with 1224 additions and 592 deletions
+1224 -592
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
"ApplicationService" "ApplicationService"
], ],
"summary": "List returns list of applications", "summary": "List returns list of applications",
"operationId": "ListMixin1", "operationId": "ListMixin6",
"parameters": [ "parameters": [
{ {
"type": "string", "type": "string",
...@@ -89,7 +89,7 @@ ...@@ -89,7 +89,7 @@
"ApplicationService" "ApplicationService"
], ],
"summary": "Create creates an application", "summary": "Create creates an application",
"operationId": "CreateMixin1", "operationId": "CreateMixin6",
"parameters": [ "parameters": [
{ {
"name": "body", "name": "body",
...@@ -116,7 +116,7 @@ ...@@ -116,7 +116,7 @@
"ApplicationService" "ApplicationService"
], ],
"summary": "Update updates an application", "summary": "Update updates an application",
"operationId": "UpdateMixin1", "operationId": "UpdateMixin6",
"parameters": [ "parameters": [
{ {
"type": "string", "type": "string",
...@@ -197,7 +197,7 @@ ...@@ -197,7 +197,7 @@
"ApplicationService" "ApplicationService"
], ],
"summary": "Get returns an application by name", "summary": "Get returns an application by name",
"operationId": "Get", "operationId": "GetMixin6",
"parameters": [ "parameters": [
{ {
"type": "string", "type": "string",
...@@ -238,7 +238,7 @@ ...@@ -238,7 +238,7 @@
"ApplicationService" "ApplicationService"
], ],
"summary": "Delete deletes an application", "summary": "Delete deletes an application",
"operationId": "DeleteMixin1", "operationId": "DeleteMixin6",
"parameters": [ "parameters": [
{ {
"type": "string", "type": "string",
...@@ -852,7 +852,7 @@ ...@@ -852,7 +852,7 @@
"ClusterService" "ClusterService"
], ],
"summary": "List returns list of clusters", "summary": "List returns list of clusters",
"operationId": "ListMixin3", "operationId": "List",
"parameters": [ "parameters": [
{ {
"type": "string", "type": "string",
...@@ -874,7 +874,7 @@ ...@@ -874,7 +874,7 @@
"ClusterService" "ClusterService"
], ],
"summary": "Create creates a cluster", "summary": "Create creates a cluster",
"operationId": "CreateMixin3", "operationId": "Create",
"parameters": [ "parameters": [
{ {
"name": "body", "name": "body",
...@@ -901,7 +901,7 @@ ...@@ -901,7 +901,7 @@
"ClusterService" "ClusterService"
], ],
"summary": "Update updates a cluster", "summary": "Update updates a cluster",
"operationId": "UpdateMixin3", "operationId": "Update",
"parameters": [ "parameters": [
{ {
"type": "string", "type": "string",
...@@ -934,7 +934,7 @@ ...@@ -934,7 +934,7 @@
"ClusterService" "ClusterService"
], ],
"summary": "Get returns a cluster by server address", "summary": "Get returns a cluster by server address",
"operationId": "GetMixin3", "operationId": "GetMixin1",
"parameters": [ "parameters": [
{ {
"type": "string", "type": "string",
...@@ -957,7 +957,7 @@ ...@@ -957,7 +957,7 @@
"ClusterService" "ClusterService"
], ],
"summary": "Delete deletes a cluster", "summary": "Delete deletes a cluster",
"operationId": "DeleteMixin3", "operationId": "Delete",
"parameters": [ "parameters": [
{ {
"type": "string", "type": "string",
...@@ -1239,7 +1239,7 @@ ...@@ -1239,7 +1239,7 @@
"RepositoryService" "RepositoryService"
], ],
"summary": "List returns list of repos", "summary": "List returns list of repos",
"operationId": "List", "operationId": "ListMixin2",
"parameters": [ "parameters": [
{ {
"type": "string", "type": "string",
...@@ -1261,7 +1261,7 @@ ...@@ -1261,7 +1261,7 @@
"RepositoryService" "RepositoryService"
], ],
"summary": "Create creates a repo", "summary": "Create creates a repo",
"operationId": "Create", "operationId": "CreateMixin2",
"parameters": [ "parameters": [
{ {
"name": "body", "name": "body",
...@@ -1288,7 +1288,7 @@ ...@@ -1288,7 +1288,7 @@
"RepositoryService" "RepositoryService"
], ],
"summary": "Update updates a repo", "summary": "Update updates a repo",
"operationId": "Update", "operationId": "UpdateMixin2",
"parameters": [ "parameters": [
{ {
"type": "string", "type": "string",
...@@ -1321,7 +1321,7 @@ ...@@ -1321,7 +1321,7 @@
"RepositoryService" "RepositoryService"
], ],
"summary": "Delete deletes a repo", "summary": "Delete deletes a repo",
"operationId": "Delete", "operationId": "DeleteMixin2",
"parameters": [ "parameters": [
{ {
"type": "string", "type": "string",
...@@ -1500,7 +1500,7 @@ ...@@ -1500,7 +1500,7 @@
"SettingsService" "SettingsService"
], ],
"summary": "Get returns Argo CD settings", "summary": "Get returns Argo CD settings",
"operationId": "GetMixin5", "operationId": "Get",
"responses": { "responses": {
"200": { "200": {
"description": "(empty)", "description": "(empty)",
...@@ -1881,6 +1881,7 @@ ...@@ -1881,6 +1881,7 @@
}, },
"parameters": { "parameters": {
"type": "array", "type": "array",
"title": "the output of `helm inspect values`",
"items": { "items": {
"$ref": "#/definitions/v1alpha1HelmParameter" "$ref": "#/definitions/v1alpha1HelmParameter"
} }
...@@ -1895,7 +1896,8 @@ ...@@ -1895,7 +1896,8 @@
} }
}, },
"values": { "values": {
"type": "string" "type": "string",
"title": "the contents of values.yaml"
} }
} }
}, },
...@@ -2615,6 +2617,9 @@ ...@@ -2615,6 +2617,9 @@
"$ref": "#/definitions/v1GroupKind" "$ref": "#/definitions/v1GroupKind"
} }
}, },
"orphanedResources": {
"$ref": "#/definitions/v1alpha1OrphanedResourcesMonitorSettings"
},
"roles": { "roles": {
"type": "array", "type": "array",
"title": "Roles are user defined RBAC roles associated with this project", "title": "Roles are user defined RBAC roles associated with this project",
...@@ -2943,6 +2948,14 @@ ...@@ -2943,6 +2948,14 @@
"title": "ApplicationTree holds nodes which belongs to the application", "title": "ApplicationTree holds nodes which belongs to the application",
"properties": { "properties": {
"nodes": { "nodes": {
"description": "Nodes contains list of nodes which either directly managed by the application and children of directly managed nodes.",
"type": "array",
"items": {
"$ref": "#/definitions/v1alpha1ResourceNode"
}
},
"orphanedNodes": {
"description": "OrphanedNodes contains if or orphaned nodes: nodes which are not managed by the app but in the same namespace. List is populated only if orphaned resources enabled in app project.",
"type": "array", "type": "array",
"items": { "items": {
"$ref": "#/definitions/v1alpha1ResourceNode" "$ref": "#/definitions/v1alpha1ResourceNode"
...@@ -3205,6 +3218,17 @@ ...@@ -3205,6 +3218,17 @@
} }
} }
}, },
"v1alpha1OrphanedResourcesMonitorSettings": {
"type": "object",
"title": "OrphanedResourcesMonitorSettings holds settings of orphaned resources monitoring",
"properties": {
"warn": {
"type": "boolean",
"format": "boolean",
"title": "Warn indicates if warning condition should be created for apps which have orphaned resources"
}
}
},
"v1alpha1ProjectRole": { "v1alpha1ProjectRole": {
"type": "object", "type": "object",
"title": "ProjectRole represents a role that has access to a project", "title": "ProjectRole represents a role that has access to a project",
......
...@@ -734,9 +734,8 @@ type resourceInfoProvider struct { ...@@ -734,9 +734,8 @@ type resourceInfoProvider struct {
// Infer if obj is namespaced or not from corresponding live objects list. If corresponding live object has namespace then target object is also namespaced. // Infer if obj is namespaced or not from corresponding live objects list. If corresponding live object has namespace then target object is also namespaced.
// If live object is missing then it does not matter if target is namespaced or not. // If live object is missing then it does not matter if target is namespaced or not.
func (p *resourceInfoProvider) IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error) { func (p *resourceInfoProvider) IsNamespaced(server string, gk schema.GroupKind) (bool, error) {
key := kube.GetResourceKey(obj) return p.namespacedByGk[gk], nil
return p.namespacedByGk[key.GroupKind()], nil
} }
func groupLocalObjs(localObs []*unstructured.Unstructured, liveObjs []*unstructured.Unstructured, appNamespace string) map[kube.ResourceKey]*unstructured.Unstructured { func groupLocalObjs(localObs []*unstructured.Unstructured, liveObjs []*unstructured.Unstructured, appNamespace string) map[kube.ResourceKey]*unstructured.Unstructured {
......
...@@ -45,6 +45,8 @@ import ( ...@@ -45,6 +45,8 @@ import (
const ( const (
updateOperationStateTimeout = 1 * time.Second updateOperationStateTimeout = 1 * time.Second
// orphanedIndex contains application which monitor orphaned resources by namespace
orphanedIndex = "orphaned"
) )
type CompareWith int type CompareWith int
...@@ -123,14 +125,17 @@ func NewApplicationController( ...@@ -123,14 +125,17 @@ func NewApplicationController(
settingsMgr: settingsMgr, settingsMgr: settingsMgr,
selfHealTimeout: selfHealTimeout, selfHealTimeout: selfHealTimeout,
} }
appInformer, appLister := ctrl.newApplicationInformerAndLister() appInformer, appLister, err := ctrl.newApplicationInformerAndLister()
if err != nil {
return nil, err
}
projInformer := v1alpha1.NewAppProjectInformer(applicationClientset, namespace, appResyncPeriod, cache.Indexers{}) projInformer := v1alpha1.NewAppProjectInformer(applicationClientset, namespace, appResyncPeriod, cache.Indexers{})
metricsAddr := fmt.Sprintf("0.0.0.0:%d", metricsPort) metricsAddr := fmt.Sprintf("0.0.0.0:%d", metricsPort)
ctrl.metricsServer = metrics.NewMetricsServer(metricsAddr, appLister, func() error { ctrl.metricsServer = metrics.NewMetricsServer(metricsAddr, appLister, func() error {
_, err := kubeClientset.Discovery().ServerVersion() _, err := kubeClientset.Discovery().ServerVersion()
return err return err
}) })
stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectlCmd, ctrl.metricsServer, ctrl.handleAppUpdated) stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectlCmd, ctrl.metricsServer, ctrl.handleObjectUpdated)
appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectlCmd, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer) appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectlCmd, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer)
ctrl.appInformer = appInformer ctrl.appInformer = appInformer
ctrl.appLister = appLister ctrl.appLister = appLister
...@@ -150,23 +155,45 @@ func isSelfReferencedApp(app *appv1.Application, ref v1.ObjectReference) bool { ...@@ -150,23 +155,45 @@ func isSelfReferencedApp(app *appv1.Application, ref v1.ObjectReference) bool {
gvk.Kind == application.ApplicationKind gvk.Kind == application.ApplicationKind
} }
func (ctrl *ApplicationController) handleAppUpdated(appName string, isManagedResource bool, ref v1.ObjectReference) { func (ctrl *ApplicationController) getAppProj(app *appv1.Application) (*appv1.AppProject, error) {
skipForceRefresh := false return argo.GetAppProject(&app.Spec, applisters.NewAppProjectLister(ctrl.projInformer.GetIndexer()), ctrl.namespace)
}
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(ctrl.namespace + "/" + appName) func (ctrl *ApplicationController) handleObjectUpdated(managedByApp map[string]bool, ref v1.ObjectReference) {
if app, ok := obj.(*appv1.Application); exists && err == nil && ok && isSelfReferencedApp(app, ref) { // if namespaced resource is not managed by any app it might be orphaned resource of some other apps
// Don't force refresh app if related resource is application itself. This prevents infinite reconciliation loop. if len(managedByApp) == 0 && ref.Namespace != "" {
skipForceRefresh = true // retrieve applications which monitor orphaned resources in the same namespace and refresh them unless resource is blacklisted in app project
if objs, err := ctrl.appInformer.GetIndexer().ByIndex(orphanedIndex, ref.Namespace); err == nil {
for i := range objs {
app, ok := objs[i].(*appv1.Application)
if !ok {
continue
}
// Ignore resource unless it is permitted in the app project. If project is not permitted then it is not controlled by the user and there is no point showing the warning.
if proj, err := ctrl.getAppProj(app); err == nil && proj.IsResourcePermitted(metav1.GroupKind{Group: ref.GroupVersionKind().Group, Kind: ref.Kind}, true) {
managedByApp[app.Name] = false
}
}
}
} }
for appName, isManagedResource := range managedByApp {
skipForceRefresh := false
obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(ctrl.namespace + "/" + appName)
if app, ok := obj.(*appv1.Application); exists && err == nil && ok && isSelfReferencedApp(app, ref) {
// Don't force refresh app if related resource is application itself. This prevents infinite reconciliation loop.
skipForceRefresh = true
}
if !skipForceRefresh { if !skipForceRefresh {
level := ComparisonWithNothing level := ComparisonWithNothing
if isManagedResource { if isManagedResource {
level = CompareWithRecent level = CompareWithRecent
}
ctrl.requestAppRefresh(appName, level)
} }
ctrl.requestAppRefresh(appName, level) ctrl.appRefreshQueue.Add(fmt.Sprintf("%s/%s", ctrl.namespace, appName))
} }
ctrl.appRefreshQueue.Add(fmt.Sprintf("%s/%s", ctrl.namespace, appName))
} }
func (ctrl *ApplicationController) setAppManagedResources(a *appv1.Application, comparisonResult *comparisonResult) (*appv1.ApplicationTree, error) { func (ctrl *ApplicationController) setAppManagedResources(a *appv1.Application, comparisonResult *comparisonResult) (*appv1.ApplicationTree, error) {
...@@ -188,8 +215,23 @@ func (ctrl *ApplicationController) setAppManagedResources(a *appv1.Application, ...@@ -188,8 +215,23 @@ func (ctrl *ApplicationController) setAppManagedResources(a *appv1.Application,
func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managedResources []*appv1.ResourceDiff) (*appv1.ApplicationTree, error) { func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managedResources []*appv1.ResourceDiff) (*appv1.ApplicationTree, error) {
nodes := make([]appv1.ResourceNode, 0) nodes := make([]appv1.ResourceNode, 0)
proj, err := argo.GetAppProject(&a.Spec, applisters.NewAppProjectLister(ctrl.projInformer.GetIndexer()), ctrl.namespace)
if err != nil {
return nil, err
}
orphanedNodesMap := make(map[kube.ResourceKey]appv1.ResourceNode)
warnOrphaned := true
if proj.Spec.OrphanedResources != nil {
orphanedNodesMap, err = ctrl.stateCache.GetNamespaceTopLevelResources(a.Spec.Destination.Server, a.Spec.Destination.Namespace)
if err != nil {
return nil, err
}
warnOrphaned = proj.Spec.OrphanedResources.IsWarn()
}
for i := range managedResources { for i := range managedResources {
managedResource := managedResources[i] managedResource := managedResources[i]
delete(orphanedNodesMap, kube.NewResourceKey(managedResource.Group, managedResource.Kind, managedResource.Namespace, managedResource.Name))
var live = &unstructured.Unstructured{} var live = &unstructured.Unstructured{}
err := json.Unmarshal([]byte(managedResource.LiveState), &live) err := json.Unmarshal([]byte(managedResource.LiveState), &live)
if err != nil { if err != nil {
...@@ -212,16 +254,40 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed ...@@ -212,16 +254,40 @@ func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managed
}, },
}) })
} else { } else {
err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, live, func(child appv1.ResourceNode) { err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, kube.GetResourceKey(live), func(child appv1.ResourceNode, appName string) {
nodes = append(nodes, child) nodes = append(nodes, child)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
} }
return &appv1.ApplicationTree{Nodes: nodes}, nil orphanedNodes := make([]appv1.ResourceNode, 0)
for k := range orphanedNodesMap {
if k.Namespace != "" && proj.IsResourcePermitted(metav1.GroupKind{Group: k.Group, Kind: k.Kind}, true) {
err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, k, func(child appv1.ResourceNode, appName string) {
belongToAnotherApp := false
if appName != "" {
if _, exists, err := ctrl.appInformer.GetIndexer().GetByKey(ctrl.namespace + "/" + appName); exists && err == nil {
belongToAnotherApp = true
}
}
if !belongToAnotherApp {
orphanedNodes = append(orphanedNodes, child)
}
})
if err != nil {
return nil, err
}
}
}
if len(orphanedNodes) > 0 && warnOrphaned {
a.Status.SetConditions([]appv1.ApplicationCondition{{
Type: appv1.ApplicationConditionOrphanedResourceWarning,
Message: fmt.Sprintf("Application has %d orphaned resources", len(orphanedNodes)),
}}, map[appv1.ApplicationConditionType]bool{appv1.ApplicationConditionOrphanedResourceWarning: true})
}
return &appv1.ApplicationTree{Nodes: nodes, OrphanedNodes: orphanedNodes}, nil
} }
func (ctrl *ApplicationController) managedResources(comparisonResult *comparisonResult) ([]*appv1.ResourceDiff, error) { func (ctrl *ApplicationController) managedResources(comparisonResult *comparisonResult) ([]*appv1.ResourceDiff, error) {
...@@ -672,11 +738,10 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo ...@@ -672,11 +738,10 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
} }
} }
conditions, hasErrors := ctrl.refreshAppConditions(app) ctrl.refreshAppConditions(app)
if hasErrors { if len(app.Status.GetErrorConditions()) > 0 {
app.Status.Sync.Status = appv1.SyncStatusCodeUnknown app.Status.Sync.Status = appv1.SyncStatusCodeUnknown
app.Status.Health.Status = appv1.HealthStatusUnknown app.Status.Health.Status = appv1.HealthStatusUnknown
app.Status.Conditions = conditions
ctrl.persistAppStatus(origApp, &app.Status) ctrl.persistAppStatus(origApp, &app.Status)
return return
} }
...@@ -692,10 +757,10 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo ...@@ -692,10 +757,10 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
} }
compareResult, err := ctrl.appStateManager.CompareAppState(app, revision, app.Spec.Source, refreshType == appv1.RefreshTypeHard, localManifests) compareResult, err := ctrl.appStateManager.CompareAppState(app, revision, app.Spec.Source, refreshType == appv1.RefreshTypeHard, localManifests)
if err != nil { if err != nil {
conditions = append(conditions, appv1.ApplicationCondition{Type: appv1.ApplicationConditionComparisonError, Message: err.Error()}) app.Status.Conditions = append(app.Status.Conditions, appv1.ApplicationCondition{Type: appv1.ApplicationConditionComparisonError, Message: err.Error()})
} else { } else {
ctrl.normalizeApplication(origApp, app, compareResult.appSourceType) ctrl.normalizeApplication(origApp, app, compareResult.appSourceType)
conditions = append(conditions, compareResult.conditions...) app.Status.Conditions = append(app.Status.Conditions, compareResult.conditions...)
} }
tree, err := ctrl.setAppManagedResources(app, compareResult) tree, err := ctrl.setAppManagedResources(app, compareResult)
if err != nil { if err != nil {
...@@ -706,7 +771,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo ...@@ -706,7 +771,7 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
syncErrCond := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources) syncErrCond := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources)
if syncErrCond != nil { if syncErrCond != nil {
conditions = append(conditions, *syncErrCond) app.Status.Conditions = append(app.Status.Conditions, *syncErrCond)
} }
app.Status.ObservedAt = &compareResult.reconciledAt app.Status.ObservedAt = &compareResult.reconciledAt
...@@ -714,7 +779,6 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo ...@@ -714,7 +779,6 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
app.Status.Sync = *compareResult.syncStatus app.Status.Sync = *compareResult.syncStatus
app.Status.Health = *compareResult.healthStatus app.Status.Health = *compareResult.healthStatus
app.Status.Resources = compareResult.resources app.Status.Resources = compareResult.resources
app.Status.Conditions = conditions
app.Status.SourceType = compareResult.appSourceType app.Status.SourceType = compareResult.appSourceType
ctrl.persistAppStatus(origApp, &app.Status) ctrl.persistAppStatus(origApp, &app.Status)
return return
...@@ -752,9 +816,9 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application, ...@@ -752,9 +816,9 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
return false, refreshType, compareWith return false, refreshType, compareWith
} }
func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application) ([]appv1.ApplicationCondition, bool) { func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application) {
conditions := make([]appv1.ApplicationCondition, 0) conditions := make([]appv1.ApplicationCondition, 0)
proj, err := argo.GetAppProject(&app.Spec, applisters.NewAppProjectLister(ctrl.projInformer.GetIndexer()), ctrl.namespace) proj, err := ctrl.getAppProj(app)
if err != nil { if err != nil {
if apierr.IsNotFound(err) { if apierr.IsNotFound(err) {
conditions = append(conditions, appv1.ApplicationCondition{ conditions = append(conditions, appv1.ApplicationCondition{
...@@ -778,9 +842,7 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application) ...@@ -778,9 +842,7 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application)
conditions = append(conditions, specConditions...) conditions = append(conditions, specConditions...)
} }
} }
app.Status.SetConditions(conditions, map[appv1.ApplicationConditionType]bool{
// List of condition types which have to be reevaluated by controller; all remaining conditions should stay as is.
reevaluateTypes := map[appv1.ApplicationConditionType]bool{
appv1.ApplicationConditionInvalidSpecError: true, appv1.ApplicationConditionInvalidSpecError: true,
appv1.ApplicationConditionUnknownError: true, appv1.ApplicationConditionUnknownError: true,
appv1.ApplicationConditionComparisonError: true, appv1.ApplicationConditionComparisonError: true,
...@@ -788,24 +850,7 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application) ...@@ -788,24 +850,7 @@ func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application)
appv1.ApplicationConditionSyncError: true, appv1.ApplicationConditionSyncError: true,
appv1.ApplicationConditionRepeatedResourceWarning: true, appv1.ApplicationConditionRepeatedResourceWarning: true,
appv1.ApplicationConditionExcludedResourceWarning: true, appv1.ApplicationConditionExcludedResourceWarning: true,
} })
appConditions := make([]appv1.ApplicationCondition, 0)
for i := 0; i < len(app.Status.Conditions); i++ {
condition := app.Status.Conditions[i]
if _, ok := reevaluateTypes[condition.Type]; !ok {
appConditions = append(appConditions, condition)
}
}
hasErrors := false
for i := range conditions {
condition := conditions[i]
appConditions = append(appConditions, condition)
if condition.IsError() {
hasErrors = true
}
}
return appConditions, hasErrors
} }
// normalizeApplication normalizes an application.spec and additionally persists updates if it changed // normalizeApplication normalizes an application.spec and additionally persists updates if it changed
...@@ -976,7 +1021,7 @@ func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application) (bool, ...@@ -976,7 +1021,7 @@ func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application) (bool,
return retryAfter <= 0, retryAfter return retryAfter <= 0, retryAfter
} }
func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.SharedIndexInformer, applisters.ApplicationLister) { func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.SharedIndexInformer, applisters.ApplicationLister, error) {
appInformerFactory := appinformers.NewFilteredSharedInformerFactory( appInformerFactory := appinformers.NewFilteredSharedInformerFactory(
ctrl.applicationClientset, ctrl.applicationClientset,
ctrl.statusRefreshTimeout, ctrl.statusRefreshTimeout,
...@@ -1020,7 +1065,24 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar ...@@ -1020,7 +1065,24 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
}, },
}, },
) )
return informer, lister err := informer.AddIndexers(cache.Indexers{
orphanedIndex: func(obj interface{}) (i []string, e error) {
app, ok := obj.(*appv1.Application)
if !ok {
return nil, nil
}
proj, err := ctrl.getAppProj(app)
if err != nil {
return nil, nil
}
if proj.Spec.OrphanedResources != nil {
return []string{app.Spec.Destination.Namespace}, nil
}
return nil, nil
},
})
return informer, lister, err
} }
func isOperationInProgress(app *appv1.Application) bool { func isOperationInProgress(app *appv1.Application) bool {
......
...@@ -31,10 +31,16 @@ import ( ...@@ -31,10 +31,16 @@ import (
"github.com/argoproj/argo-cd/util/settings" "github.com/argoproj/argo-cd/util/settings"
) )
type namespacedResource struct {
argoappv1.ResourceNode
AppName string
}
type fakeData struct { type fakeData struct {
apps []runtime.Object apps []runtime.Object
manifestResponse *apiclient.ManifestResponse manifestResponse *apiclient.ManifestResponse
managedLiveObjs map[kube.ResourceKey]*unstructured.Unstructured managedLiveObjs map[kube.ResourceKey]*unstructured.Unstructured
namespacedResources map[kube.ResourceKey]namespacedResource
} }
func newFakeController(data *fakeData) *ApplicationController { func newFakeController(data *fakeData) *ApplicationController {
...@@ -90,14 +96,25 @@ func newFakeController(data *fakeData) *ApplicationController { ...@@ -90,14 +96,25 @@ func newFakeController(data *fakeData) *ApplicationController {
defer cancelProj() defer cancelProj()
cancelApp := test.StartInformer(ctrl.appInformer) cancelApp := test.StartInformer(ctrl.appInformer)
defer cancelApp() defer cancelApp()
// Mock out call to GetManagedLiveObjs if fake data supplied mockStateCache := mockstatecache.LiveStateCache{}
if data.managedLiveObjs != nil { ctrl.appStateManager.(*appStateManager).liveStateCache = &mockStateCache
mockStateCache := mockstatecache.LiveStateCache{} ctrl.stateCache = &mockStateCache
mockStateCache.On("GetManagedLiveObjs", mock.Anything, mock.Anything).Return(data.managedLiveObjs, nil) mockStateCache.On("IsNamespaced", mock.Anything, mock.Anything).Return(true, nil)
mockStateCache.On("IsNamespaced", mock.Anything, mock.Anything).Return(true, nil) mockStateCache.On("GetManagedLiveObjs", mock.Anything, mock.Anything).Return(data.managedLiveObjs, nil)
ctrl.stateCache = &mockStateCache response := make(map[kube.ResourceKey]argoappv1.ResourceNode)
ctrl.appStateManager.(*appStateManager).liveStateCache = &mockStateCache for k, v := range data.namespacedResources {
response[k] = v.ResourceNode
} }
mockStateCache.On("GetNamespaceTopLevelResources", mock.Anything, mock.Anything).Return(response, nil)
mockStateCache.On("IterateHierarchy", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
key := args[1].(kube.ResourceKey)
action := args[2].(func(child argoappv1.ResourceNode, appName string))
appName := ""
if res, ok := data.namespacedResources[key]; ok {
appName = res.AppName
}
action(argoappv1.ResourceNode{ResourceRef: argoappv1.ResourceRef{Group: key.Group, Namespace: key.Namespace, Name: key.Name}}, appName)
}).Return(nil)
return ctrl return ctrl
} }
...@@ -464,17 +481,44 @@ func TestHandleAppUpdated(t *testing.T) { ...@@ -464,17 +481,44 @@ func TestHandleAppUpdated(t *testing.T) {
app.Spec.Destination.Server = common.KubernetesInternalAPIServerAddr app.Spec.Destination.Server = common.KubernetesInternalAPIServerAddr
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}}) ctrl := newFakeController(&fakeData{apps: []runtime.Object{app}})
ctrl.handleAppUpdated(app.Name, true, kube.GetObjectRef(kube.MustToUnstructured(app))) ctrl.handleObjectUpdated(map[string]bool{app.Name: true}, kube.GetObjectRef(kube.MustToUnstructured(app)))
isRequested, level := ctrl.isRefreshRequested(app.Name) isRequested, level := ctrl.isRefreshRequested(app.Name)
assert.False(t, isRequested) assert.False(t, isRequested)
assert.Equal(t, ComparisonWithNothing, level) assert.Equal(t, ComparisonWithNothing, level)
ctrl.handleAppUpdated(app.Name, true, corev1.ObjectReference{UID: "test", Kind: kube.DeploymentKind, Name: "test", Namespace: "default"}) ctrl.handleObjectUpdated(map[string]bool{app.Name: true}, corev1.ObjectReference{UID: "test", Kind: kube.DeploymentKind, Name: "test", Namespace: "default"})
isRequested, level = ctrl.isRefreshRequested(app.Name) isRequested, level = ctrl.isRefreshRequested(app.Name)
assert.True(t, isRequested) assert.True(t, isRequested)
assert.Equal(t, CompareWithRecent, level) assert.Equal(t, CompareWithRecent, level)
} }
func TestHandleOrphanedResourceUpdated(t *testing.T) {
app1 := newFakeApp()
app1.Name = "app1"
app1.Spec.Destination.Namespace = test.FakeArgoCDNamespace
app1.Spec.Destination.Server = common.KubernetesInternalAPIServerAddr
app2 := newFakeApp()
app2.Name = "app2"
app2.Spec.Destination.Namespace = test.FakeArgoCDNamespace
app2.Spec.Destination.Server = common.KubernetesInternalAPIServerAddr
proj := defaultProj.DeepCopy()
proj.Spec.OrphanedResources = &argoappv1.OrphanedResourcesMonitorSettings{}
ctrl := newFakeController(&fakeData{apps: []runtime.Object{app1, app2, proj}})
ctrl.handleObjectUpdated(map[string]bool{}, corev1.ObjectReference{UID: "test", Kind: kube.DeploymentKind, Name: "test", Namespace: test.FakeArgoCDNamespace})
isRequested, level := ctrl.isRefreshRequested(app1.Name)
assert.True(t, isRequested)
assert.Equal(t, ComparisonWithNothing, level)
isRequested, level = ctrl.isRefreshRequested(app2.Name)
assert.True(t, isRequested)
assert.Equal(t, ComparisonWithNothing, level)
}
func TestSetOperationStateOnDeletedApp(t *testing.T) { func TestSetOperationStateOnDeletedApp(t *testing.T) {
ctrl := newFakeController(&fakeData{apps: []runtime.Object{}}) ctrl := newFakeController(&fakeData{apps: []runtime.Object{}})
fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset) fakeAppCs := ctrl.applicationClientset.(*appclientset.Clientset)
......
...@@ -27,18 +27,20 @@ type cacheSettings struct { ...@@ -27,18 +27,20 @@ type cacheSettings struct {
} }
type LiveStateCache interface { type LiveStateCache interface {
IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error) IsNamespaced(server string, gk schema.GroupKind) (bool, error)
// Executes give callback against resource specified by the key and all its children // Executes give callback against resource specified by the key and all its children
IterateHierarchy(server string, obj *unstructured.Unstructured, action func(child appv1.ResourceNode)) error IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string)) error
// Returns state of live nodes which correspond for target nodes of specified application. // Returns state of live nodes which correspond for target nodes of specified application.
GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error)
// Returns all top level resources (resources without owner references) of a specified namespace
GetNamespaceTopLevelResources(server string, namespace string) (map[kube.ResourceKey]appv1.ResourceNode, error)
// Starts watching resources of each controlled cluster. // Starts watching resources of each controlled cluster.
Run(ctx context.Context) error Run(ctx context.Context) error
// Invalidate invalidates the entire cluster state cache // Invalidate invalidates the entire cluster state cache
Invalidate() Invalidate()
} }
type AppUpdatedHandler = func(appName string, isManagedResource bool, ref v1.ObjectReference) type ObjectUpdatedHandler = func(managedByApp map[string]bool, ref v1.ObjectReference)
func GetTargetObjKey(a *appv1.Application, un *unstructured.Unstructured, isNamespaced bool) kube.ResourceKey { func GetTargetObjKey(a *appv1.Application, un *unstructured.Unstructured, isNamespaced bool) kube.ResourceKey {
key := kube.GetResourceKey(un) key := kube.GetResourceKey(un)
...@@ -57,14 +59,14 @@ func NewLiveStateCache( ...@@ -57,14 +59,14 @@ func NewLiveStateCache(
settingsMgr *settings.SettingsManager, settingsMgr *settings.SettingsManager,
kubectl kube.Kubectl, kubectl kube.Kubectl,
metricsServer *metrics.MetricsServer, metricsServer *metrics.MetricsServer,
onAppUpdated AppUpdatedHandler) LiveStateCache { onObjectUpdated ObjectUpdatedHandler) LiveStateCache {
return &liveStateCache{ return &liveStateCache{
appInformer: appInformer, appInformer: appInformer,
db: db, db: db,
clusters: make(map[string]*clusterInfo), clusters: make(map[string]*clusterInfo),
lock: &sync.Mutex{}, lock: &sync.Mutex{},
onAppUpdated: onAppUpdated, onObjectUpdated: onObjectUpdated,
kubectl: kubectl, kubectl: kubectl,
settingsMgr: settingsMgr, settingsMgr: settingsMgr,
metricsServer: metricsServer, metricsServer: metricsServer,
...@@ -77,7 +79,7 @@ type liveStateCache struct { ...@@ -77,7 +79,7 @@ type liveStateCache struct {
clusters map[string]*clusterInfo clusters map[string]*clusterInfo
lock *sync.Mutex lock *sync.Mutex
appInformer cache.SharedIndexInformer appInformer cache.SharedIndexInformer
onAppUpdated AppUpdatedHandler onObjectUpdated ObjectUpdatedHandler
kubectl kube.Kubectl kubectl kube.Kubectl
settingsMgr *settings.SettingsManager settingsMgr *settings.SettingsManager
metricsServer *metrics.MetricsServer metricsServer *metrics.MetricsServer
...@@ -115,7 +117,7 @@ func (c *liveStateCache) getCluster(server string) (*clusterInfo, error) { ...@@ -115,7 +117,7 @@ func (c *liveStateCache) getCluster(server string) (*clusterInfo, error) {
lock: &sync.Mutex{}, lock: &sync.Mutex{},
nodes: make(map[kube.ResourceKey]*node), nodes: make(map[kube.ResourceKey]*node),
nsIndex: make(map[string]map[kube.ResourceKey]*node), nsIndex: make(map[string]map[kube.ResourceKey]*node),
onAppUpdated: c.onAppUpdated, onObjectUpdated: c.onObjectUpdated,
kubectl: c.kubectl, kubectl: c.kubectl,
cluster: cluster, cluster: cluster,
syncTime: nil, syncTime: nil,
...@@ -153,23 +155,31 @@ func (c *liveStateCache) Invalidate() { ...@@ -153,23 +155,31 @@ func (c *liveStateCache) Invalidate() {
log.Info("live state cache invalidated") log.Info("live state cache invalidated")
} }
func (c *liveStateCache) IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error) { func (c *liveStateCache) IsNamespaced(server string, gk schema.GroupKind) (bool, error) {
clusterInfo, err := c.getSyncedCluster(server) clusterInfo, err := c.getSyncedCluster(server)
if err != nil { if err != nil {
return false, err return false, err
} }
return clusterInfo.isNamespaced(obj), nil return clusterInfo.isNamespaced(gk), nil
} }
func (c *liveStateCache) IterateHierarchy(server string, obj *unstructured.Unstructured, action func(child appv1.ResourceNode)) error { func (c *liveStateCache) IterateHierarchy(server string, key kube.ResourceKey, action func(child appv1.ResourceNode, appName string)) error {
clusterInfo, err := c.getSyncedCluster(server) clusterInfo, err := c.getSyncedCluster(server)
if err != nil { if err != nil {
return err return err
} }
clusterInfo.iterateHierarchy(obj, action) clusterInfo.iterateHierarchy(key, action)
return nil return nil
} }
func (c *liveStateCache) GetNamespaceTopLevelResources(server string, namespace string) (map[kube.ResourceKey]appv1.ResourceNode, error) {
clusterInfo, err := c.getSyncedCluster(server)
if err != nil {
return nil, err
}
return clusterInfo.getNamespaceTopLevelResources(namespace), nil
}
func (c *liveStateCache) GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) { func (c *liveStateCache) GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
clusterInfo, err := c.getSyncedCluster(a.Spec.Destination.Server) clusterInfo, err := c.getSyncedCluster(a.Spec.Destination.Server)
if err != nil { if err != nil {
......
...@@ -48,7 +48,7 @@ type clusterInfo struct { ...@@ -48,7 +48,7 @@ type clusterInfo struct {
nodes map[kube.ResourceKey]*node nodes map[kube.ResourceKey]*node
nsIndex map[string]map[kube.ResourceKey]*node nsIndex map[string]map[kube.ResourceKey]*node
onAppUpdated AppUpdatedHandler onObjectUpdated ObjectUpdatedHandler
kubectl kube.Kubectl kubectl kube.Kubectl
cluster *appv1.Cluster cluster *appv1.Cluster
log *log.Entry log *log.Entry
...@@ -85,6 +85,33 @@ func (c *clusterInfo) replaceResourceCache(gk schema.GroupKind, resourceVersion ...@@ -85,6 +85,33 @@ func (c *clusterInfo) replaceResourceCache(gk schema.GroupKind, resourceVersion
} }
} }
func isServiceAccountTokenSecret(un *unstructured.Unstructured) (bool, metav1.OwnerReference) {
ref := metav1.OwnerReference{
APIVersion: "v1",
Kind: kube.ServiceAccountKind,
}
if un.GetKind() != kube.SecretKind || un.GroupVersionKind().Group != "" {
return false, ref
}
if typeVal, ok, err := unstructured.NestedString(un.Object, "type"); !ok || err != nil || typeVal != "kubernetes.io/service-account-token" {
return false, ref
}
annotations := un.GetAnnotations()
if annotations == nil {
return false, ref
}
id, okId := annotations["kubernetes.io/service-account.uid"]
name, okName := annotations["kubernetes.io/service-account.name"]
if okId && okName {
ref.Name = name
ref.UID = types.UID(id)
}
return ref.Name != "" && ref.UID != "", ref
}
func (c *clusterInfo) createObjInfo(un *unstructured.Unstructured, appInstanceLabel string) *node { func (c *clusterInfo) createObjInfo(un *unstructured.Unstructured, appInstanceLabel string) *node {
ownerRefs := un.GetOwnerReferences() ownerRefs := un.GetOwnerReferences()
// Special case for endpoint. Remove after https://github.com/kubernetes/kubernetes/issues/28483 is fixed // Special case for endpoint. Remove after https://github.com/kubernetes/kubernetes/issues/28483 is fixed
...@@ -95,11 +122,18 @@ func (c *clusterInfo) createObjInfo(un *unstructured.Unstructured, appInstanceLa ...@@ -95,11 +122,18 @@ func (c *clusterInfo) createObjInfo(un *unstructured.Unstructured, appInstanceLa
APIVersion: "v1", APIVersion: "v1",
}) })
} }
// edge case. Consider auto-created service account tokens as a child of service account objects
if yes, ref := isServiceAccountTokenSecret(un); yes {
ownerRefs = append(ownerRefs, ref)
}
nodeInfo := &node{ nodeInfo := &node{
resourceVersion: un.GetResourceVersion(), resourceVersion: un.GetResourceVersion(),
ref: kube.GetObjectRef(un), ref: kube.GetObjectRef(un),
ownerRefs: ownerRefs, ownerRefs: ownerRefs,
} }
populateNodeInfo(un, nodeInfo) populateNodeInfo(un, nodeInfo)
appName := kube.GetAppInstanceLabel(un, appInstanceLabel) appName := kube.GetAppInstanceLabel(un, appInstanceLabel)
if len(ownerRefs) == 0 && appName != "" { if len(ownerRefs) == 0 && appName != "" {
...@@ -323,13 +357,24 @@ func (c *clusterInfo) ensureSynced() error { ...@@ -323,13 +357,24 @@ func (c *clusterInfo) ensureSynced() error {
return c.syncError return c.syncError
} }
func (c *clusterInfo) iterateHierarchy(obj *unstructured.Unstructured, action func(child appv1.ResourceNode)) { func (c *clusterInfo) getNamespaceTopLevelResources(namespace string) map[kube.ResourceKey]appv1.ResourceNode {
c.lock.Lock()
defer c.lock.Unlock()
nodes := make(map[kube.ResourceKey]appv1.ResourceNode)
for _, node := range c.nsIndex[namespace] {
if len(node.ownerRefs) == 0 {
nodes[node.resourceKey()] = node.asResourceNode()
}
}
return nodes
}
func (c *clusterInfo) iterateHierarchy(key kube.ResourceKey, action func(child appv1.ResourceNode, appName string)) {
c.lock.Lock() c.lock.Lock()
defer c.lock.Unlock() defer c.lock.Unlock()
key := kube.GetResourceKey(obj)
if objInfo, ok := c.nodes[key]; ok { if objInfo, ok := c.nodes[key]; ok {
action(objInfo.asResourceNode())
nsNodes := c.nsIndex[key.Namespace] nsNodes := c.nsIndex[key.Namespace]
action(objInfo.asResourceNode(), objInfo.getApp(nsNodes))
childrenByUID := make(map[types.UID][]*node) childrenByUID := make(map[types.UID][]*node)
for _, child := range nsNodes { for _, child := range nsNodes {
if objInfo.isParentOf(child) { if objInfo.isParentOf(child) {
...@@ -347,17 +392,15 @@ func (c *clusterInfo) iterateHierarchy(obj *unstructured.Unstructured, action fu ...@@ -347,17 +392,15 @@ func (c *clusterInfo) iterateHierarchy(obj *unstructured.Unstructured, action fu
return strings.Compare(key1.String(), key2.String()) < 0 return strings.Compare(key1.String(), key2.String()) < 0
}) })
child := children[0] child := children[0]
action(child.asResourceNode()) action(child.asResourceNode(), child.getApp(nsNodes))
child.iterateChildren(nsNodes, map[kube.ResourceKey]bool{objInfo.resourceKey(): true}, action) child.iterateChildren(nsNodes, map[kube.ResourceKey]bool{objInfo.resourceKey(): true}, action)
} }
} }
} else {
action(c.createObjInfo(obj, c.cacheSettingsSrc().AppInstanceLabelKey).asResourceNode())
} }
} }
func (c *clusterInfo) isNamespaced(obj *unstructured.Unstructured) bool { func (c *clusterInfo) isNamespaced(gk schema.GroupKind) bool {
if api, ok := c.apisMeta[kube.GetResourceKey(obj).GroupKind()]; ok && !api.namespaced { if api, ok := c.apisMeta[gk]; ok && !api.namespaced {
return false return false
} }
return true return true
...@@ -380,7 +423,7 @@ func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*uns ...@@ -380,7 +423,7 @@ func (c *clusterInfo) getManagedLiveObjs(a *appv1.Application, targetObjs []*uns
lock := &sync.Mutex{} lock := &sync.Mutex{}
err := util.RunAllAsync(len(targetObjs), func(i int) error { err := util.RunAllAsync(len(targetObjs), func(i int) error {
targetObj := targetObjs[i] targetObj := targetObjs[i]
key := GetTargetObjKey(a, targetObj, c.isNamespaced(targetObj)) key := GetTargetObjKey(a, targetObj, c.isNamespaced(targetObj.GroupVersionKind().GroupKind()))
lock.Lock() lock.Lock()
managedObj := managedObjs[key] managedObj := managedObjs[key]
lock.Unlock() lock.Unlock()
...@@ -472,9 +515,7 @@ func (c *clusterInfo) onNodeUpdated(exists bool, existingNode *node, un *unstruc ...@@ -472,9 +515,7 @@ func (c *clusterInfo) onNodeUpdated(exists bool, existingNode *node, un *unstruc
toNotify[app] = n.isRootAppNode() || toNotify[app] toNotify[app] = n.isRootAppNode() || toNotify[app]
} }
} }
for name, isRootAppNode := range toNotify { c.onObjectUpdated(toNotify, newObj.ref)
c.onAppUpdated(name, isRootAppNode, newObj.ref)
}
} }
func (c *clusterInfo) onNodeRemoved(key kube.ResourceKey, n *node) { func (c *clusterInfo) onNodeRemoved(key kube.ResourceKey, n *node) {
...@@ -484,9 +525,11 @@ func (c *clusterInfo) onNodeRemoved(key kube.ResourceKey, n *node) { ...@@ -484,9 +525,11 @@ func (c *clusterInfo) onNodeRemoved(key kube.ResourceKey, n *node) {
} }
c.removeNode(key) c.removeNode(key)
managedByApp := make(map[string]bool)
if appName != "" { if appName != "" {
c.onAppUpdated(appName, n.isRootAppNode(), n.ref) managedByApp[appName] = n.isRootAppNode()
} }
c.onObjectUpdated(managedByApp, n.ref)
} }
var ( var (
......
...@@ -153,16 +153,16 @@ func newCluster(objs ...*unstructured.Unstructured) *clusterInfo { ...@@ -153,16 +153,16 @@ func newCluster(objs ...*unstructured.Unstructured) *clusterInfo {
func newClusterExt(kubectl kube.Kubectl) *clusterInfo { func newClusterExt(kubectl kube.Kubectl) *clusterInfo {
return &clusterInfo{ return &clusterInfo{
lock: &sync.Mutex{}, lock: &sync.Mutex{},
nodes: make(map[kube.ResourceKey]*node), nodes: make(map[kube.ResourceKey]*node),
onAppUpdated: func(appName string, fullRefresh bool, reference corev1.ObjectReference) {}, onObjectUpdated: func(managedByApp map[string]bool, reference corev1.ObjectReference) {},
kubectl: kubectl, kubectl: kubectl,
nsIndex: make(map[string]map[kube.ResourceKey]*node), nsIndex: make(map[string]map[kube.ResourceKey]*node),
cluster: &appv1.Cluster{}, cluster: &appv1.Cluster{},
syncTime: nil, syncTime: nil,
syncLock: &sync.Mutex{}, syncLock: &sync.Mutex{},
apisMeta: make(map[schema.GroupKind]*apiMeta), apisMeta: make(map[schema.GroupKind]*apiMeta),
log: log.WithField("cluster", "test"), log: log.WithField("cluster", "test"),
cacheSettingsSrc: func() *cacheSettings { cacheSettingsSrc: func() *cacheSettings {
return &cacheSettings{AppInstanceLabelKey: common.LabelKeyAppInstance} return &cacheSettings{AppInstanceLabelKey: common.LabelKeyAppInstance}
}, },
...@@ -171,12 +171,43 @@ func newClusterExt(kubectl kube.Kubectl) *clusterInfo { ...@@ -171,12 +171,43 @@ func newClusterExt(kubectl kube.Kubectl) *clusterInfo {
func getChildren(cluster *clusterInfo, un *unstructured.Unstructured) []appv1.ResourceNode { func getChildren(cluster *clusterInfo, un *unstructured.Unstructured) []appv1.ResourceNode {
hierarchy := make([]appv1.ResourceNode, 0) hierarchy := make([]appv1.ResourceNode, 0)
cluster.iterateHierarchy(un, func(child appv1.ResourceNode) { cluster.iterateHierarchy(kube.GetResourceKey(un), func(child appv1.ResourceNode, app string) {
hierarchy = append(hierarchy, child) hierarchy = append(hierarchy, child)
}) })
return hierarchy[1:] return hierarchy[1:]
} }
func TestGetNamespaceResources(t *testing.T) {
defaultNamespaceTopLevel1 := strToUnstructured(`
apiVersion: apps/v1
kind: Deployment
metadata: {"name": "helm-guestbook1", "namespace": "default"}
`)
defaultNamespaceTopLevel2 := strToUnstructured(`
apiVersion: apps/v1
kind: Deployment
metadata: {"name": "helm-guestbook2", "namespace": "default"}
`)
kubesystemNamespaceTopLevel2 := strToUnstructured(`
apiVersion: apps/v1
kind: Deployment
metadata: {"name": "helm-guestbook3", "namespace": "kube-system"}
`)
cluster := newCluster(defaultNamespaceTopLevel1, defaultNamespaceTopLevel2, kubesystemNamespaceTopLevel2)
err := cluster.ensureSynced()
assert.Nil(t, err)
resources := cluster.getNamespaceTopLevelResources("default")
assert.Len(t, resources, 2)
assert.Equal(t, resources[kube.GetResourceKey(defaultNamespaceTopLevel1)].Name, "helm-guestbook1")
assert.Equal(t, resources[kube.GetResourceKey(defaultNamespaceTopLevel2)].Name, "helm-guestbook2")
resources = cluster.getNamespaceTopLevelResources("kube-system")
assert.Len(t, resources, 1)
assert.Equal(t, resources[kube.GetResourceKey(kubesystemNamespaceTopLevel2)].Name, "helm-guestbook3")
}
func TestGetChildren(t *testing.T) { func TestGetChildren(t *testing.T) {
cluster := newCluster(testPod, testRS, testDeploy) cluster := newCluster(testPod, testRS, testDeploy)
err := cluster.ensureSynced() err := cluster.ensureSynced()
...@@ -372,8 +403,10 @@ func TestUpdateResourceTags(t *testing.T) { ...@@ -372,8 +403,10 @@ func TestUpdateResourceTags(t *testing.T) {
func TestUpdateAppResource(t *testing.T) { func TestUpdateAppResource(t *testing.T) {
updatesReceived := make([]string, 0) updatesReceived := make([]string, 0)
cluster := newCluster(testPod, testRS, testDeploy) cluster := newCluster(testPod, testRS, testDeploy)
cluster.onAppUpdated = func(appName string, fullRefresh bool, _ corev1.ObjectReference) { cluster.onObjectUpdated = func(managedByApp map[string]bool, _ corev1.ObjectReference) {
updatesReceived = append(updatesReceived, fmt.Sprintf("%s: %v", appName, fullRefresh)) for appName, fullRefresh := range managedByApp {
updatesReceived = append(updatesReceived, fmt.Sprintf("%s: %v", appName, fullRefresh))
}
} }
err := cluster.ensureSynced() err := cluster.ensureSynced()
......
...@@ -5,6 +5,7 @@ package mocks ...@@ -5,6 +5,7 @@ package mocks
import context "context" import context "context"
import kube "github.com/argoproj/argo-cd/util/kube" import kube "github.com/argoproj/argo-cd/util/kube"
import mock "github.com/stretchr/testify/mock" import mock "github.com/stretchr/testify/mock"
import schema "k8s.io/apimachinery/pkg/runtime/schema"
import unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" import unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
import v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1" import v1alpha1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
...@@ -36,25 +37,48 @@ func (_m *LiveStateCache) GetManagedLiveObjs(a *v1alpha1.Application, targetObjs ...@@ -36,25 +37,48 @@ func (_m *LiveStateCache) GetManagedLiveObjs(a *v1alpha1.Application, targetObjs
return r0, r1 return r0, r1
} }
// GetNamespaceTopLevelResources provides a mock function with given fields: server, namespace
func (_m *LiveStateCache) GetNamespaceTopLevelResources(server string, namespace string) (map[kube.ResourceKey]v1alpha1.ResourceNode, error) {
ret := _m.Called(server, namespace)
var r0 map[kube.ResourceKey]v1alpha1.ResourceNode
if rf, ok := ret.Get(0).(func(string, string) map[kube.ResourceKey]v1alpha1.ResourceNode); ok {
r0 = rf(server, namespace)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(map[kube.ResourceKey]v1alpha1.ResourceNode)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, string) error); ok {
r1 = rf(server, namespace)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Invalidate provides a mock function with given fields: // Invalidate provides a mock function with given fields:
func (_m *LiveStateCache) Invalidate() { func (_m *LiveStateCache) Invalidate() {
_m.Called() _m.Called()
} }
// IsNamespaced provides a mock function with given fields: server, obj // IsNamespaced provides a mock function with given fields: server, gk
func (_m *LiveStateCache) IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error) { func (_m *LiveStateCache) IsNamespaced(server string, gk schema.GroupKind) (bool, error) {
ret := _m.Called(server, obj) ret := _m.Called(server, gk)
var r0 bool var r0 bool
if rf, ok := ret.Get(0).(func(string, *unstructured.Unstructured) bool); ok { if rf, ok := ret.Get(0).(func(string, schema.GroupKind) bool); ok {
r0 = rf(server, obj) r0 = rf(server, gk)
} else { } else {
r0 = ret.Get(0).(bool) r0 = ret.Get(0).(bool)
} }
var r1 error var r1 error
if rf, ok := ret.Get(1).(func(string, *unstructured.Unstructured) error); ok { if rf, ok := ret.Get(1).(func(string, schema.GroupKind) error); ok {
r1 = rf(server, obj) r1 = rf(server, gk)
} else { } else {
r1 = ret.Error(1) r1 = ret.Error(1)
} }
...@@ -62,13 +86,13 @@ func (_m *LiveStateCache) IsNamespaced(server string, obj *unstructured.Unstruct ...@@ -62,13 +86,13 @@ func (_m *LiveStateCache) IsNamespaced(server string, obj *unstructured.Unstruct
return r0, r1 return r0, r1
} }
// IterateHierarchy provides a mock function with given fields: server, obj, action // IterateHierarchy provides a mock function with given fields: server, key, action
func (_m *LiveStateCache) IterateHierarchy(server string, obj *unstructured.Unstructured, action func(v1alpha1.ResourceNode)) error { func (_m *LiveStateCache) IterateHierarchy(server string, key kube.ResourceKey, action func(v1alpha1.ResourceNode, string)) error {
ret := _m.Called(server, obj, action) ret := _m.Called(server, key, action)
var r0 error var r0 error
if rf, ok := ret.Get(0).(func(string, *unstructured.Unstructured, func(v1alpha1.ResourceNode)) error); ok { if rf, ok := ret.Get(0).(func(string, kube.ResourceKey, func(v1alpha1.ResourceNode, string)) error); ok {
r0 = rf(server, obj, action) r0 = rf(server, key, action)
} else { } else {
r0 = ret.Error(0) r0 = ret.Error(0)
} }
......
...@@ -127,14 +127,14 @@ func (n *node) asResourceNode() appv1.ResourceNode { ...@@ -127,14 +127,14 @@ func (n *node) asResourceNode() appv1.ResourceNode {
} }
} }
func (n *node) iterateChildren(ns map[kube.ResourceKey]*node, parents map[kube.ResourceKey]bool, action func(child appv1.ResourceNode)) { func (n *node) iterateChildren(ns map[kube.ResourceKey]*node, parents map[kube.ResourceKey]bool, action func(child appv1.ResourceNode, appName string)) {
for childKey, child := range ns { for childKey, child := range ns {
if n.isParentOf(ns[childKey]) { if n.isParentOf(ns[childKey]) {
if parents[childKey] { if parents[childKey] {
key := n.resourceKey() key := n.resourceKey()
log.Warnf("Circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()) log.Warnf("Circular dependency detected. %s is child and parent of %s", childKey.String(), key.String())
} else { } else {
action(child.asResourceNode()) action(child.asResourceNode(), child.getApp(ns))
child.iterateChildren(ns, newResourceKeySet(parents, n.resourceKey()), action) child.iterateChildren(ns, newResourceKeySet(parents, n.resourceKey()), action)
} }
} }
......
...@@ -54,3 +54,30 @@ metadata: ...@@ -54,3 +54,30 @@ metadata:
assert.Equal(t, parent.ref.UID, matchingNameEndPoint.ownerRefs[0].UID) assert.Equal(t, parent.ref.UID, matchingNameEndPoint.ownerRefs[0].UID)
assert.False(t, parent.isParentOf(nonMatchingNameEndPoint)) assert.False(t, parent.isParentOf(nonMatchingNameEndPoint))
} }
func TestIsServiceAccoountParentOfSecret(t *testing.T) {
serviceAccount := c.createObjInfo(strToUnstructured(`
apiVersion: v1
kind: ServiceAccount
metadata:
name: default
namespace: default
uid: '123'
secrets:
- name: default-token-123
`), "")
tokenSecret := c.createObjInfo(strToUnstructured(`
apiVersion: v1
kind: Secret
metadata:
annotations:
kubernetes.io/service-account.name: default
kubernetes.io/service-account.uid: '123'
name: default-token-123
namespace: default
uid: '345'
type: kubernetes.io/service-account-token
`), "")
assert.True(t, serviceAccount.isParentOf(tokenSecret))
}
...@@ -9,6 +9,7 @@ import ( ...@@ -9,6 +9,7 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
...@@ -52,7 +53,7 @@ func GetLiveObjs(res []managedResource) []*unstructured.Unstructured { ...@@ -52,7 +53,7 @@ func GetLiveObjs(res []managedResource) []*unstructured.Unstructured {
} }
type ResourceInfoProvider interface { type ResourceInfoProvider interface {
IsNamespaced(server string, obj *unstructured.Unstructured) (bool, error) IsNamespaced(server string, gk schema.GroupKind) (bool, error)
} }
// AppStateManager defines methods which allow to compare application spec and actual application state. // AppStateManager defines methods which allow to compare application spec and actual application state.
...@@ -173,7 +174,7 @@ func DeduplicateTargetObjects( ...@@ -173,7 +174,7 @@ func DeduplicateTargetObjects(
targetByKey := make(map[kubeutil.ResourceKey][]*unstructured.Unstructured) targetByKey := make(map[kubeutil.ResourceKey][]*unstructured.Unstructured)
for i := range objs { for i := range objs {
obj := objs[i] obj := objs[i]
isNamespaced, err := infoProvider.IsNamespaced(server, obj) isNamespaced, err := infoProvider.IsNamespaced(server, obj.GroupVersionKind().GroupKind())
if err != nil { if err != nil {
return objs, nil, err return objs, nil, err
} }
...@@ -327,7 +328,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st ...@@ -327,7 +328,7 @@ func (m *appStateManager) CompareAppState(app *v1alpha1.Application, revision st
for i, obj := range targetObjs { for i, obj := range targetObjs {
gvk := obj.GroupVersionKind() gvk := obj.GroupVersionKind()
ns := util.FirstNonEmpty(obj.GetNamespace(), app.Spec.Destination.Namespace) ns := util.FirstNonEmpty(obj.GetNamespace(), app.Spec.Destination.Namespace)
if namespaced, err := m.liveStateCache.IsNamespaced(app.Spec.Destination.Server, obj); err == nil && !namespaced { if namespaced, err := m.liveStateCache.IsNamespaced(app.Spec.Destination.Server, obj.GroupVersionKind().GroupKind()); err == nil && !namespaced {
ns = "" ns = ""
} }
key := kubeutil.NewResourceKey(gvk.Group, gvk.Kind, ns, obj.GetName()) key := kubeutil.NewResourceKey(gvk.Group, gvk.Kind, ns, obj.GetName())
......
...@@ -289,3 +289,55 @@ func TestSetHealthSelfReferencedApp(t *testing.T) { ...@@ -289,3 +289,55 @@ func TestSetHealthSelfReferencedApp(t *testing.T) {
assert.Equal(t, compRes.healthStatus.Status, argoappv1.HealthStatusHealthy) assert.Equal(t, compRes.healthStatus.Status, argoappv1.HealthStatusHealthy)
} }
func TestSetManagedResourcesWithOrphanedResources(t *testing.T) {
proj := defaultProj.DeepCopy()
proj.Spec.OrphanedResources = &argoappv1.OrphanedResourcesMonitorSettings{}
app := newFakeApp()
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app, proj},
namespacedResources: map[kube.ResourceKey]namespacedResource{
kube.NewResourceKey("apps", kube.DeploymentKind, app.Namespace, "guestbook"): {
ResourceNode: argoappv1.ResourceNode{
ResourceRef: argoappv1.ResourceRef{Kind: kube.DeploymentKind, Name: "guestbook", Namespace: app.Namespace},
},
AppName: "",
},
},
})
tree, err := ctrl.setAppManagedResources(app, &comparisonResult{managedResources: make([]managedResource, 0)})
assert.NoError(t, err)
assert.Equal(t, len(tree.OrphanedNodes), 1)
assert.Equal(t, "guestbook", tree.OrphanedNodes[0].Name)
assert.Equal(t, app.Namespace, tree.OrphanedNodes[0].Namespace)
}
func TestSetManagedResourcesWithResourcesOfAnotherApp(t *testing.T) {
proj := defaultProj.DeepCopy()
proj.Spec.OrphanedResources = &argoappv1.OrphanedResourcesMonitorSettings{}
app1 := newFakeApp()
app1.Name = "app1"
app2 := newFakeApp()
app2.Name = "app2"
ctrl := newFakeController(&fakeData{
apps: []runtime.Object{app1, app2, proj},
namespacedResources: map[kube.ResourceKey]namespacedResource{
kube.NewResourceKey("apps", kube.DeploymentKind, app2.Namespace, "guestbook"): {
ResourceNode: argoappv1.ResourceNode{
ResourceRef: argoappv1.ResourceRef{Kind: kube.DeploymentKind, Name: "guestbook", Namespace: app2.Namespace},
},
AppName: "app2",
},
},
})
tree, err := ctrl.setAppManagedResources(app1, &comparisonResult{managedResources: make([]managedResource, 0)})
assert.NoError(t, err)
assert.Equal(t, len(tree.OrphanedNodes), 0)
}
...@@ -30,6 +30,10 @@ spec: ...@@ -30,6 +30,10 @@ spec:
- group: '' - group: ''
kind: NetworkPolicy kind: NetworkPolicy
# Enables namespace orphaned resource monitoring.
orphanedResources:
warn: false
roles: roles:
# A role which provides read-only access to all applications in the project # A role which provides read-only access to all applications in the project
- name: read-only - name: read-only
......
...@@ -430,6 +430,15 @@ spec: ...@@ -430,6 +430,15 @@ spec:
- kind - kind
type: object type: object
type: array type: array
orphanedResources:
description: OrphanedResources specifies if controller should monitor
orphaned resources of apps in this project
properties:
warn:
description: Warn indicates if warning condition should be created
for apps which have orphaned resources
type: boolean
type: object
roles: roles:
description: Roles are user defined RBAC roles associated with this description: Roles are user defined RBAC roles associated with this
project project
......
...@@ -2179,6 +2179,15 @@ spec: ...@@ -2179,6 +2179,15 @@ spec:
- kind - kind
type: object type: object
type: array type: array
orphanedResources:
description: OrphanedResources specifies if controller should monitor
orphaned resources of apps in this project
properties:
warn:
description: Warn indicates if warning condition should be created
for apps which have orphaned resources
type: boolean
type: object
roles: roles:
description: Roles are user defined RBAC roles associated with this description: Roles are user defined RBAC roles associated with this
project project
......
...@@ -2179,6 +2179,15 @@ spec: ...@@ -2179,6 +2179,15 @@ spec:
- kind - kind
type: object type: object
type: array type: array
orphanedResources:
description: OrphanedResources specifies if controller should monitor
orphaned resources of apps in this project
properties:
warn:
description: Warn indicates if warning condition should be created
for apps which have orphaned resources
type: boolean
type: object
roles: roles:
description: Roles are user defined RBAC roles associated with this description: Roles are user defined RBAC roles associated with this
project project
......
...@@ -2179,6 +2179,15 @@ spec: ...@@ -2179,6 +2179,15 @@ spec:
- kind - kind
type: object type: object
type: array type: array
orphanedResources:
description: OrphanedResources specifies if controller should monitor
orphaned resources of apps in this project
properties:
warn:
description: Warn indicates if warning condition should be created
for apps which have orphaned resources
type: boolean
type: object
roles: roles:
description: Roles are user defined RBAC roles associated with this description: Roles are user defined RBAC roles associated with this
project project
......
...@@ -2179,6 +2179,15 @@ spec: ...@@ -2179,6 +2179,15 @@ spec:
- kind - kind
type: object type: object
type: array type: array
orphanedResources:
description: OrphanedResources specifies if controller should monitor
orphaned resources of apps in this project
properties:
warn:
description: Warn indicates if warning condition should be created
for apps which have orphaned resources
type: boolean
type: object
roles: roles:
description: Roles are user defined RBAC roles associated with this description: Roles are user defined RBAC roles associated with this
project project
......
This diff is collapsed.
...@@ -65,6 +65,9 @@ message AppProjectSpec { ...@@ -65,6 +65,9 @@ message AppProjectSpec {
// NamespaceResourceBlacklist contains list of blacklisted namespace level resources // NamespaceResourceBlacklist contains list of blacklisted namespace level resources
repeated k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind namespaceResourceBlacklist = 6; repeated k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind namespaceResourceBlacklist = 6;
// OrphanedResources specifies if controller should monitor orphaned resources of apps in this project
optional OrphanedResourcesMonitorSettings orphanedResources = 7;
} }
// Application is a definition of Application resource. // Application is a definition of Application resource.
...@@ -248,7 +251,11 @@ message ApplicationSummary { ...@@ -248,7 +251,11 @@ message ApplicationSummary {
// ApplicationTree holds nodes which belongs to the application // ApplicationTree holds nodes which belongs to the application
message ApplicationTree { message ApplicationTree {
// Nodes contains list of nodes which either directly managed by the application and children of directly managed nodes.
repeated ResourceNode nodes = 1; repeated ResourceNode nodes = 1;
// OrphanedNodes contains if or orphaned nodes: nodes which are not managed by the app but in the same namespace. List is populated only if orphaned resources enabled in app project.
repeated ResourceNode orphanedNodes = 2;
} }
// ApplicationWatchEvent contains information about application change. // ApplicationWatchEvent contains information about application change.
...@@ -460,6 +467,12 @@ message OperationState { ...@@ -460,6 +467,12 @@ message OperationState {
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 7; optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 7;
} }
// OrphanedResourcesMonitorSettings holds settings of orphaned resources monitoring
message OrphanedResourcesMonitorSettings {
// Warn indicates if warning condition should be created for apps which have orphaned resources
optional bool warn = 1;
}
// ProjectRole represents a role that has access to a project // ProjectRole represents a role that has access to a project
message ProjectRole { message ProjectRole {
// Name is a name for this role // Name is a name for this role
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment