Unverified Commit d50d9c81 authored by baderbuddy's avatar baderbuddy Committed by GitHub
Browse files

Add the capability for controller level checks (#285)

* Add controller level checks

* Add check for multipleReplicas

* Fixed spec

* Add controller level check

* Move controller schema checks to their own function.
parent de98d9fb
Showing with 240 additions and 71 deletions
+240 -71
successMessage: Multiple replicas are scheduled
failureMessage: Only one replica is scheduled
category: Reliability
target: Controller
controllers:
include:
- Deployment
schema:
'$schema': http://json-schema.org/draft-07/schema
type: object
required:
- Object
properties:
Object:
type: object
required:
- spec
properties:
spec:
type: object
required:
- replicas
properties:
replicas:
type: integer
minimum: 2
checks:
# reliability
multipleReplicasForDeployment: warning
# resources
cpuRequestsMissing: warning
cpuLimitsMissing: warning
......
checks:
# reliability
multipleReplicasForDeployment: ignore
# resources
cpuRequestsMissing: warning
cpuLimitsMissing: warning
......
......@@ -17,6 +17,8 @@ const (
TargetContainer TargetKind = "Container"
// TargetPod points to the pod spec
TargetPod TargetKind = "Pod"
// TargetController points to the controller's spec
TargetController TargetKind = "Controller"
)
// SchemaCheck is a Polaris check that runs using JSON Schema
......@@ -128,6 +130,12 @@ func (check SchemaCheck) CheckPod(pod *corev1.PodSpec) (bool, error) {
return check.CheckObject(pod)
}
// CheckController checks a controler's spec against the schema
func (check SchemaCheck) CheckController(bytes []byte) (bool, error) {
errs, err := check.Schema.ValidateBytes(bytes)
return len(errs) == 0, err
}
// CheckContainer checks a container spec against the schema
func (check SchemaCheck) CheckContainer(container *corev1.Container) (bool, error) {
return check.CheckObject(container)
......
......@@ -15,6 +15,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
k8sYaml "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/dynamic"
......@@ -146,7 +147,13 @@ func CreateResourceProviderFromAPI(kube kubernetes.Interface, clusterName string
}
restMapper := restmapper.NewDiscoveryRESTMapper(resources)
objectCache := map[string]metav1.Object{}
objectCache := map[string]unstructured.Unstructured{}
controllers, err := LoadControllers(pods.Items, dynamic, &restMapper, objectCache)
if err != nil {
logrus.Errorf("Error loading controllers from pods: %v", err)
return nil, err
}
api := ResourceProvider{
ServerVersion: serverVersion.Major + "." + serverVersion.Minor,
......@@ -155,12 +162,12 @@ func CreateResourceProviderFromAPI(kube kubernetes.Interface, clusterName string
CreationTime: time.Now(),
Nodes: nodes.Items,
Namespaces: namespaces.Items,
Controllers: LoadControllers(pods.Items, dynamic, &restMapper, objectCache),
Controllers: controllers,
}
return &api, nil
}
func cacheAllObjectsOfKind(dynamicClient dynamic.Interface, groupVersionResource schema.GroupVersionResource, objectCache map[string]metav1.Object) error {
func cacheAllObjectsOfKind(dynamicClient dynamic.Interface, groupVersionResource schema.GroupVersionResource, objectCache map[string]unstructured.Unstructured) error {
objects, err := dynamicClient.Resource(groupVersionResource).Namespace("").List(metav1.ListOptions{})
if err != nil {
logrus.Warnf("Error retrieving parent object API %s and Kind %s because of error: %v ", groupVersionResource.Version, groupVersionResource.Resource, err)
......@@ -169,18 +176,13 @@ func cacheAllObjectsOfKind(dynamicClient dynamic.Interface, groupVersionResource
for idx, object := range objects.Items {
key := fmt.Sprintf("%s/%s/%s", object.GetKind(), object.GetNamespace(), object.GetName())
objMeta, err := meta.Accessor(&objects.Items[idx])
if err != nil {
logrus.Warnf("Error converting object to meta object %s %v", object.GetName(), err)
return err
}
objectCache[key] = objMeta
objectCache[key] = objects.Items[idx]
}
return nil
}
// LoadControllers loads a list of controllers from the kubeResources Pods
func LoadControllers(pods []corev1.Pod, dynamicClientPointer *dynamic.Interface, restMapperPointer *meta.RESTMapper, objectCache map[string]metav1.Object) []GenericWorkload {
func LoadControllers(pods []corev1.Pod, dynamicClientPointer *dynamic.Interface, restMapperPointer *meta.RESTMapper, objectCache map[string]unstructured.Unstructured) ([]GenericWorkload, error) {
interfaces := []GenericWorkload{}
deduped := map[string]corev1.Pod{}
for _, pod := range pods {
......@@ -192,9 +194,13 @@ func LoadControllers(pods []corev1.Pod, dynamicClientPointer *dynamic.Interface,
deduped[pod.ObjectMeta.Namespace+"/"+owners[0].Kind+"/"+owners[0].Name] = pod
}
for _, pod := range deduped {
interfaces = append(interfaces, NewGenericWorkload(pod, dynamicClientPointer, restMapperPointer, objectCache))
workload, err := NewGenericWorkload(pod, dynamicClientPointer, restMapperPointer, objectCache)
if err != nil {
return nil, err
}
interfaces = append(interfaces, workload)
}
return deduplicateControllers(interfaces)
return deduplicateControllers(interfaces), nil
}
// Because the controllers with an Owner take on the name of the Owner, this eliminates any duplicates.
......@@ -243,7 +249,14 @@ func addResourceFromString(contents string, resources *ResourceProvider) error {
} else if resource.Kind == "Pod" {
pod := corev1.Pod{}
err = decoder.Decode(&pod)
resources.Controllers = append(resources.Controllers, NewGenericWorkloadFromPod(pod))
if err != nil {
return err
}
workload, err := NewGenericWorkloadFromPod(pod, pod)
if err != nil {
return err
}
resources.Controllers = append(resources.Controllers, workload)
} else {
yamlNode := make(map[string]interface{})
err = yaml.Unmarshal(contentBytes, &yamlNode)
......@@ -264,7 +277,11 @@ func addResourceFromString(contents string, resources *ResourceProvider) error {
decoder := k8sYaml.NewYAMLOrJSONDecoder(bytes.NewReader(marshaledYaml), 1000)
pod := corev1.Pod{}
err = decoder.Decode(&pod)
newController := NewGenericWorkloadFromPod(pod)
newController, err := NewGenericWorkloadFromPod(pod, yamlNode)
if err != nil {
return err
}
newController.Kind = resource.Kind
resources.Controllers = append(resources.Controllers, newController)
}
......
package kube
import (
"encoding/json"
"errors"
"fmt"
"github.com/sirupsen/logrus"
kubeAPICoreV1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
kubeAPIMetaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
)
// GenericWorkload is a base implementation with some free methods for inherited structs
type GenericWorkload struct {
Kind string
PodSpec kubeAPICoreV1.PodSpec
ObjectMeta kubeAPIMetaV1.Object
Kind string
PodSpec kubeAPICoreV1.PodSpec
ObjectMeta kubeAPIMetaV1.Object
OriginalObjectJSON []byte
}
// NewGenericWorkloadFromPod builds a new workload for a given Pod without looking at parents
func NewGenericWorkloadFromPod(originalResource kubeAPICoreV1.Pod) GenericWorkload {
func NewGenericWorkloadFromPod(podResource kubeAPICoreV1.Pod, originalObject interface{}) (GenericWorkload, error) {
workload := GenericWorkload{}
workload.PodSpec = originalResource.Spec
workload.ObjectMeta = originalResource.ObjectMeta.GetObjectMeta()
workload.PodSpec = podResource.Spec
workload.ObjectMeta = podResource.ObjectMeta.GetObjectMeta()
workload.Kind = "Pod"
return workload
if originalObject != nil {
bytes, err := json.Marshal(originalObject)
if err != nil {
return workload, err
}
workload.OriginalObjectJSON = bytes
}
return workload, nil
}
// NewGenericWorkload builds a new workload for a given Pod
func NewGenericWorkload(originalResource kubeAPICoreV1.Pod, dynamicClientPointer *dynamic.Interface, restMapperPointer *meta.RESTMapper, objectCache map[string]kubeAPIMetaV1.Object) GenericWorkload {
workload := NewGenericWorkloadFromPod(originalResource)
func NewGenericWorkload(podResource kubeAPICoreV1.Pod, dynamicClientPointer *dynamic.Interface, restMapperPointer *meta.RESTMapper, objectCache map[string]unstructured.Unstructured) (GenericWorkload, error) {
workload, err := NewGenericWorkloadFromPod(podResource, nil)
if err != nil {
return workload, err
}
dynamicClient := *dynamicClientPointer
restMapper := *restMapperPointer
// If an owner exists then set the name to the workload.
// This allows us to handle CRDs creating Workloads or DeploymentConfigs in OpenShift.
owners := workload.ObjectMeta.GetOwnerReferences()
lastKey := ""
for len(owners) > 0 {
if len(owners) > 1 {
logrus.Warn("More than 1 owner found")
......@@ -46,10 +60,17 @@ func NewGenericWorkload(originalResource kubeAPICoreV1.Pod, dynamicClientPointer
}
workload.Kind = firstOwner.Kind
key := fmt.Sprintf("%s/%s/%s", firstOwner.Kind, workload.ObjectMeta.GetNamespace(), firstOwner.Name)
objMeta, ok := objectCache[key]
lastKey = key
abstractObject, ok := objectCache[key]
if ok {
objMeta, err := meta.Accessor(&abstractObject)
if err != nil {
logrus.Warnf("Error retrieving parent metadata %s of API %s and Kind %s because of error: %v ", firstOwner.Name, firstOwner.APIVersion, firstOwner.Kind, err)
return workload, err
}
workload.ObjectMeta = objMeta
owners = objMeta.GetOwnerReferences()
owners = abstractObject.GetOwnerReferences()
continue
}
......@@ -57,37 +78,44 @@ func NewGenericWorkload(originalResource kubeAPICoreV1.Pod, dynamicClientPointer
mapping, err := restMapper.RESTMapping(fqKind.GroupKind(), fqKind.Version)
if err != nil {
logrus.Warnf("Error retrieving mapping %s of API %s and Kind %s because of error: %v ", firstOwner.Name, firstOwner.APIVersion, firstOwner.Kind, err)
return workload
return workload, nil
}
err = cacheAllObjectsOfKind(dynamicClient, mapping.Resource, objectCache)
if err != nil {
logrus.Warnf("Error getting objects of Kind %s %v", firstOwner.Kind, err)
return workload
return workload, err
}
objMeta, ok = objectCache[key]
abstractObject, ok = objectCache[key]
if ok {
objMeta, err := meta.Accessor(&abstractObject)
if err != nil {
logrus.Warnf("Error retrieving parent metadata %s of API %s and Kind %s because of error: %v ", firstOwner.Name, firstOwner.APIVersion, firstOwner.Kind, err)
return workload, err
}
workload.ObjectMeta = objMeta
owners = objMeta.GetOwnerReferences()
owners = abstractObject.GetOwnerReferences()
continue
} else {
logrus.Errorf("Cache missed again %s", key)
return workload, errors.New("Could not retrieve parent object")
}
parent, err := dynamicClient.Resource(mapping.Resource).Namespace(workload.ObjectMeta.GetNamespace()).Get(firstOwner.Name, kubeAPIMetaV1.GetOptions{})
}
if lastKey != "" {
bytes, err := json.Marshal(objectCache[lastKey])
if err != nil {
logrus.Warnf("Error retrieving parent object %s of API %s and Kind %s because of error: %v ", firstOwner.Name, firstOwner.APIVersion, firstOwner.Kind, err)
return workload
return workload, err
}
objMeta, err = meta.Accessor(parent)
workload.OriginalObjectJSON = bytes
} else {
bytes, err := json.Marshal(podResource)
if err != nil {
logrus.Warnf("Error retrieving parent metadata %s of API %s and Kind %s because of error: %v ", firstOwner.Name, firstOwner.APIVersion, firstOwner.Kind, err)
return workload
return workload, err
}
workload.ObjectMeta = objMeta
objectCache[key] = objMeta
owners = parent.GetOwnerReferences()
workload.OriginalObjectJSON = bytes
}
return workload
return workload, nil
}
......@@ -50,17 +50,18 @@ exemptions:
- foo
`
func getEmptyWorkload(name string) kube.GenericWorkload {
workload := kube.NewGenericWorkloadFromPod(corev1.Pod{
func getEmptyWorkload(t *testing.T, name string) kube.GenericWorkload {
workload, err := kube.NewGenericWorkloadFromPod(corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
})
}, nil)
assert.NoError(t, err)
return workload
}
func testValidate(t *testing.T, container *corev1.Container, resourceConf *string, controllerName string, expectedErrors []ResultMessage, expectedWarnings []ResultMessage, expectedSuccesses []ResultMessage) {
testValidateWithWorkload(t, container, resourceConf, getEmptyWorkload(controllerName), expectedErrors, expectedWarnings, expectedSuccesses)
testValidateWithWorkload(t, container, resourceConf, getEmptyWorkload(t, controllerName), expectedErrors, expectedWarnings, expectedSuccesses)
}
func testValidateWithWorkload(t *testing.T, container *corev1.Container, resourceConf *string, workload kube.GenericWorkload, expectedErrors []ResultMessage, expectedWarnings []ResultMessage, expectedSuccesses []ResultMessage) {
......@@ -88,7 +89,7 @@ func TestValidateResourcesEmptyConfig(t *testing.T) {
Name: "Empty",
}
results, err := applyContainerSchemaChecks(&conf.Configuration{}, getEmptyWorkload(""), container, false)
results, err := applyContainerSchemaChecks(&conf.Configuration{}, getEmptyWorkload(t, ""), container, false)
if err != nil {
panic(err)
}
......@@ -184,7 +185,7 @@ func TestValidateHealthChecks(t *testing.T) {
for idx, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
controller := getEmptyWorkload("")
controller := getEmptyWorkload(t, "")
results, err := applyContainerSchemaChecks(&conf.Configuration{Checks: tt.probes}, controller, tt.container, tt.isInit)
if err != nil {
panic(err)
......@@ -298,7 +299,7 @@ func TestValidateImage(t *testing.T) {
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
controller := getEmptyWorkload("")
controller := getEmptyWorkload(t, "")
results, err := applyContainerSchemaChecks(&conf.Configuration{Checks: tt.image}, controller, tt.container, false)
if err != nil {
panic(err)
......@@ -415,7 +416,7 @@ func TestValidateNetworking(t *testing.T) {
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
controller := getEmptyWorkload("")
controller := getEmptyWorkload(t, "")
results, err := applyContainerSchemaChecks(&conf.Configuration{Checks: tt.networkConf}, controller, tt.container, false)
if err != nil {
panic(err)
......@@ -919,7 +920,8 @@ func TestValidateSecurity(t *testing.T) {
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
workload := kube.NewGenericWorkloadFromPod(corev1.Pod{Spec: *tt.pod})
workload, err := kube.NewGenericWorkloadFromPod(corev1.Pod{Spec: *tt.pod}, nil)
assert.NoError(t, err)
results, err := applyContainerSchemaChecks(&conf.Configuration{Checks: tt.securityConf}, workload, tt.container, false)
if err != nil {
panic(err)
......@@ -1063,7 +1065,8 @@ func TestValidateRunAsRoot(t *testing.T) {
}
for idx, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
workload := kube.NewGenericWorkloadFromPod(corev1.Pod{Spec: *tt.pod})
workload, err := kube.NewGenericWorkloadFromPod(corev1.Pod{Spec: *tt.pod}, nil)
assert.NoError(t, err)
results, err := applyContainerSchemaChecks(&config, workload, tt.container, false)
if err != nil {
panic(err)
......@@ -1164,7 +1167,7 @@ func TestValidateResourcesEmptyContainerCPURequestsExempt(t *testing.T) {
expectedSuccesses := []ResultMessage{}
workload := kube.NewGenericWorkloadFromPod(corev1.Pod{
workload, err := kube.NewGenericWorkloadFromPod(corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Annotations: map[string]string{
......@@ -1172,6 +1175,7 @@ func TestValidateResourcesEmptyContainerCPURequestsExempt(t *testing.T) {
"polaris.fairwinds.com/memoryRequestsMissing-exempt": "truthy", // Don't actually exempt this controller from memoryRequestsMissing
},
},
})
}, nil)
assert.NoError(t, err)
testValidateWithWorkload(t, &container, &resourceConfMinimal, workload, expectedErrors, expectedWarnings, expectedSuccesses)
}
......@@ -31,11 +31,17 @@ func ValidateController(conf *conf.Configuration, controller kube.GenericWorkloa
if err != nil {
return ControllerResult{}, err
}
controllerResult, err := applyControllerSchemaChecks(conf, controller)
if err != nil {
return ControllerResult{}, err
}
result := ControllerResult{
Kind: controller.Kind,
Name: controller.ObjectMeta.GetName(),
Namespace: controller.ObjectMeta.GetNamespace(),
Results: ResultSet{},
Results: controllerResult,
PodResult: podResult,
}
......
......@@ -32,7 +32,8 @@ func TestValidateController(t *testing.T) {
"hostPIDSet": conf.SeverityError,
},
}
deployment := kube.NewGenericWorkloadFromPod(test.MockPod())
deployment, err := kube.NewGenericWorkloadFromPod(test.MockPod(), nil)
assert.NoError(t, err)
deployment.Kind = "Deployment"
expectedSum := CountSummary{
Successes: uint(2),
......@@ -56,6 +57,44 @@ func TestValidateController(t *testing.T) {
assert.EqualValues(t, expectedResults, actualResult.PodResult.Results)
}
func TestControllerLevelChecks(t *testing.T) {
c := conf.Configuration{
Checks: map[string]conf.Severity{
"multipleReplicasForDeployment": conf.SeverityError,
},
}
resources, err := kube.CreateResourceProviderFromPath("../kube/test_files/test_1")
assert.Equal(t, nil, err, "Error should be nil")
assert.Equal(t, 8, len(resources.Controllers), "Should have eight controllers")
expectedSum := CountSummary{
Successes: uint(0),
Warnings: uint(0),
Errors: uint(1),
}
expectedResults := ResultSet{
"multipleReplicasForDeployment": {ID: "multipleReplicasForDeployment", Message: "Only one replica is scheduled", Success: false, Severity: "error", Category: "Reliability"},
}
for _, controller := range resources.Controllers {
if controller.Kind == "Deployment" && controller.ObjectMeta.GetName() == "test-deployment" {
actualResult, err := ValidateController(&c, controller)
if err != nil {
panic(err)
}
assert.Equal(t, "Deployment", actualResult.Kind)
assert.Equal(t, 1, len(actualResult.Results), "should be equal")
assert.EqualValues(t, expectedSum, actualResult.GetSummary())
assert.EqualValues(t, expectedResults, actualResult.Results)
}
}
}
func TestSkipHealthChecks(t *testing.T) {
c := conf.Configuration{
Checks: map[string]conf.Severity{
......@@ -65,7 +104,8 @@ func TestSkipHealthChecks(t *testing.T) {
}
pod := test.MockPod()
pod.Spec.InitContainers = []corev1.Container{test.MockContainer("test")}
deployment := kube.NewGenericWorkloadFromPod(pod)
deployment, err := kube.NewGenericWorkloadFromPod(pod, nil)
assert.NoError(t, err)
deployment.Kind = "Deployment"
expectedSum := CountSummary{
Successes: uint(0),
......@@ -86,7 +126,8 @@ func TestSkipHealthChecks(t *testing.T) {
assert.EqualValues(t, ResultSet{}, actualResult.PodResult.ContainerResults[0].Results)
assert.EqualValues(t, expectedResults, actualResult.PodResult.ContainerResults[1].Results)
job := kube.NewGenericWorkloadFromPod(test.MockPod())
job, err := kube.NewGenericWorkloadFromPod(test.MockPod(), nil)
assert.NoError(t, err)
job.Kind = "Job"
expectedSum = CountSummary{
Successes: uint(0),
......@@ -103,7 +144,8 @@ func TestSkipHealthChecks(t *testing.T) {
assert.EqualValues(t, expectedSum, actualResult.GetSummary())
assert.EqualValues(t, expectedResults, actualResult.PodResult.ContainerResults[0].Results)
cronjob := kube.NewGenericWorkloadFromPod(test.MockPod())
cronjob, err := kube.NewGenericWorkloadFromPod(test.MockPod(), nil)
assert.NoError(t, err)
cronjob.Kind = "CronJob"
expectedSum = CountSummary{
Successes: uint(0),
......@@ -129,7 +171,8 @@ func TestControllerExemptions(t *testing.T) {
},
}
pod := test.MockPod()
workload := kube.NewGenericWorkloadFromPod(pod)
workload, err := kube.NewGenericWorkloadFromPod(pod, nil)
assert.NoError(t, err)
workload.Kind = "Deployment"
resources := &kube.ResourceProvider{
Controllers: []kube.GenericWorkload{workload},
......
......@@ -38,8 +38,8 @@ func TestValidatePod(t *testing.T) {
k8s, _ := test.SetupTestAPI()
k8s = test.SetupAddControllers(k8s, "test")
p := test.MockPod()
deployment := kube.NewGenericWorkloadFromPod(p)
deployment, err := kube.NewGenericWorkloadFromPod(p, nil)
assert.NoError(t, err)
expectedSum := CountSummary{
Successes: uint(4),
Warnings: uint(0),
......@@ -76,8 +76,8 @@ func TestInvalidIPCPod(t *testing.T) {
k8s = test.SetupAddControllers(k8s, "test")
p := test.MockPod()
p.Spec.HostIPC = true
workload := kube.NewGenericWorkloadFromPod(p)
workload, err := kube.NewGenericWorkloadFromPod(p, nil)
assert.NoError(t, err)
expectedSum := CountSummary{
Successes: uint(3),
Warnings: uint(0),
......@@ -113,8 +113,8 @@ func TestInvalidNeworkPod(t *testing.T) {
k8s = test.SetupAddControllers(k8s, "test")
p := test.MockPod()
p.Spec.HostNetwork = true
workload := kube.NewGenericWorkloadFromPod(p)
workload, err := kube.NewGenericWorkloadFromPod(p, nil)
assert.NoError(t, err)
expectedSum := CountSummary{
Successes: uint(3),
Warnings: uint(1),
......@@ -151,8 +151,8 @@ func TestInvalidPIDPod(t *testing.T) {
k8s = test.SetupAddControllers(k8s, "test")
p := test.MockPod()
p.Spec.HostPID = true
workload := kube.NewGenericWorkloadFromPod(p)
workload, err := kube.NewGenericWorkloadFromPod(p, nil)
assert.NoError(t, err)
expectedSum := CountSummary{
Successes: uint(3),
Warnings: uint(0),
......@@ -198,8 +198,8 @@ func TestExemption(t *testing.T) {
p.ObjectMeta = metav1.ObjectMeta{
Name: "foo",
}
workload := kube.NewGenericWorkloadFromPod(p)
workload, err := kube.NewGenericWorkloadFromPod(p, nil)
assert.NoError(t, err)
expectedSum := CountSummary{
Successes: uint(3),
Warnings: uint(0),
......
......@@ -21,6 +21,8 @@ var (
// We explicitly set the order to avoid thrash in the
// tests as we migrate toward JSON schema
checkOrder = []string{
// Controller Checks
"multipleReplicasForDeployment",
// Pod checks
"hostIPCSet",
"hostPIDSet",
......@@ -135,6 +137,31 @@ func applyPodSchemaChecks(conf *config.Configuration, controller kube.GenericWor
return results, nil
}
func applyControllerSchemaChecks(conf *config.Configuration, controller kube.GenericWorkload) (ResultSet, error) {
results := ResultSet{}
checkIDs := getSortedKeys(conf.Checks)
objectAnnotations := controller.ObjectMeta.GetAnnotations()
for _, checkID := range checkIDs {
exemptValue := objectAnnotations[getExemptKey(checkID)]
if strings.ToLower(exemptValue) == "true" {
continue
}
check, err := resolveCheck(conf, checkID, controller, config.TargetController, false)
if err != nil {
return nil, err
} else if check == nil {
continue
}
passes, err := check.CheckController(controller.OriginalObjectJSON)
if err != nil {
return nil, err
}
results[check.ID] = makeResult(conf, check, passes)
}
return results, nil
}
func applyContainerSchemaChecks(conf *config.Configuration, controller kube.GenericWorkload, container *corev1.Container, isInit bool) (ResultSet, error) {
results := ResultSet{}
checkIDs := getSortedKeys(conf.Checks)
......
......@@ -138,7 +138,7 @@ func TestValidateResourcesPartiallyValid(t *testing.T) {
func TestValidateResourcesInit(t *testing.T) {
emptyContainer := &corev1.Container{}
controller := getEmptyWorkload("")
controller := getEmptyWorkload(t, "")
parsedConf, err := conf.Parse([]byte(resourceConfRanges))
assert.NoError(t, err, "Expected no error when parsing config")
......
......@@ -83,11 +83,13 @@ func NewWebhook(name string, mgr manager.Manager, validator Validator, apiType r
func (v *Validator) handleInternal(ctx context.Context, req types.Request) (*validator.PodResult, error) {
pod := corev1.Pod{}
var originalObject interface{}
if req.AdmissionRequest.Kind.Kind == "Pod" {
err := v.decoder.Decode(req, &pod)
if err != nil {
return nil, err
}
originalObject = pod
} else {
decoded := map[string]interface{}{}
err := json.Unmarshal(req.AdmissionRequest.Object.Raw, &decoded)
......@@ -103,8 +105,12 @@ func (v *Validator) handleInternal(ctx context.Context, req types.Request) (*val
if err != nil {
return nil, err
}
originalObject = decoded
}
controller, err := kube.NewGenericWorkloadFromPod(pod, originalObject)
if err != nil {
return nil, err
}
controller := kube.NewGenericWorkloadFromPod(pod)
controller.Kind = req.AdmissionRequest.Kind.Kind
controllerResult, err := validator.ValidateController(&v.Config, controller)
if err != nil {
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment