Unverified Commit eb829603 authored by Danil Vagapov's avatar Danil Vagapov Committed by GitHub
Browse files

feat: add custom policy example (production-best-practices-policy) (#332)

parent 5c241a74
Showing with 620 additions and 0 deletions
+620 -0
# production-best-practices-policy
## from https://github.com/learnk8s/kubernetes-production-best-practices
Added next custom policies recommended for production environment:
* CUSTOM_CONTAINERS_PODS_MISSING_OWNERS
* CUSTOM_CONTAINERS_MISSING_LIVENESSPROBE
* CUSTOM_CONTAINERS_MISSING_READINESSPROBE
* CUSTOM_CONTAINERS_MISSING_IMAGE_TAG
* CUSTOM_CONTAINERS_MIN_REPLICAS
* CUSTOM_CONTAINERS_MISSING_PODANTIAFFINITY
* CUSTOM_CONTAINERS_RESOURCES_REQUESTS_AND_LIMITS
* CUSTOM_CONTAINERS_RESOURCES_REQUESTS_CPU_BELOW_1000M
* CUSTOM_CONTAINERS_TECHNICAL_LABELS
* CUSTOM_CONTAINERS_BUSINESS_LABELS
* CUSTOM_CONTAINERS_SECURITY_LABELS
* CUSTOM_CONTAINERS_RESTRICT_ALPHA_BETA
### Policy type: Containers
---
apiVersion: apps/v1
kind: Pod
metadata:
name: fail-environment-label
labels:
environment: qa
app: test
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: test-deploy
name: test-deploy
spec:
replicas: 1
selector:
matchLabels:
app: test-deploy
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: test-deploy
spec:
containers:
- image: nginx
name: nginx
resources: {}
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: web
labels:
app: nginx
app.kubernetes.io/name: test-deploy
app.kubernetes.io/instance: test-deploy-5fa65d2
confidentiality: official
compliance: pci
spec:
selector:
matchLabels:
app: nginx
serviceName: "nginx"
replicas: 3
template:
metadata:
labels:
app: nginx
app.kubernetes.io/name: test-deploy
app.kubernetes.io/instance: test-deploy-5fa65d2
confidentiality: official
compliance: pci
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- store
topologyKey: "kubernetes.io/hostname"
containers:
- name: nginx
image: k8s.gcr.io/nginx-slim:0.8
ports:
- containerPort: 80
name: web
readinessProbe:
httpGet:
path: /readnessprobe
port: 8080
initialDelaySeconds: 3
periodSeconds: 3
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "my-storage-class"
resources:
requests:
storage: 1Gi
---
apiVersion: apps/v1
kind: Pod
metadata:
name: pass-policy
labels:
app.kubernetes.io/name: pass-policy
app.kubernetes.io/instance: pass-policy-5fa65d2
app.kubernetes.io/version: "42"
app.kubernetes.io/component: api
app.kubernetes.io/part-of: payment-gateway
app.kubernetes.io/managed-by: helm
owner: payment-team
project: fraud-detection
business-unit: "80432"
confidentiality: official
compliance: pci
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: security
operator: In
values:
- S1
topologyKey: topology.kubernetes.io/zone
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 8080
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 3
periodSeconds: 3
readinessProbe:
httpGet:
path: /readnessprobe
port: 8080
initialDelaySeconds: 3
periodSeconds: 3
resources:
limits:
cpu: 500m
memory: 4Gi
requests:
cpu: 200m
memory: 2Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
name: test-deploy
labels:
app: test-deploy
app.kubernetes.io/name: test-deploy
app.kubernetes.io/instance: test-deploy-5fa65d2
app.kubernetes.io/version: "42"
app.kubernetes.io/component: api
app.kubernetes.io/part-of: test-deploy
app.kubernetes.io/managed-by: kubectl
owner: payment-team
project: fraud-detection
business-unit: "80432"
confidentiality: official
compliance: pci
spec:
replicas: 2
selector:
matchLabels:
app: test-deploy
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: test-deploy
app.kubernetes.io/name: test-deploy
app.kubernetes.io/instance: test-deploy-5fa65d2
app.kubernetes.io/version: "42"
app.kubernetes.io/component: api
app.kubernetes.io/part-of: test-deploy
app.kubernetes.io/managed-by: kubectl
owner: payment-team
project: fraud-detection
business-unit: "80432"
confidentiality: official
compliance: pci
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- store
topologyKey: "kubernetes.io/hostname"
containers:
- image: nginx:1.14.2
name: nginx
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 500m
memory: 2Gi
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 3
periodSeconds: 3
readinessProbe:
httpGet:
path: /readnessprobe
port: 8080
initialDelaySeconds: 3
periodSeconds: 3
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
labels:
app: nginx
app.kubernetes.io/name: test-deploy
app.kubernetes.io/instance: test-deploy-5fa65d2
app.kubernetes.io/version: "42"
app.kubernetes.io/component: api
app.kubernetes.io/part-of: test-deploy
app.kubernetes.io/managed-by: kubectl
owner: payment-team
project: fraud-detection
business-unit: "80432"
confidentiality: official
compliance: pci
spec:
selector:
matchLabels:
app: nginx
serviceName: "nginx"
replicas: 3
template:
metadata:
labels:
app: nginx
app.kubernetes.io/name: test-deploy
app.kubernetes.io/instance: test-deploy-5fa65d2
app.kubernetes.io/version: "42"
app.kubernetes.io/component: api
app.kubernetes.io/part-of: test-deploy
app.kubernetes.io/managed-by: kubectl
owner: payment-team
project: fraud-detection
business-unit: "80432"
confidentiality: official
compliance: pci
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- store
topologyKey: "kubernetes.io/hostname"
containers:
- name: nginx
image: k8s.gcr.io/nginx-slim:0.8
ports:
- containerPort: 80
name: web
resources:
limits:
cpu: 2
memory: 4Gi
requests:
cpu: 1
memory: 2Gi
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 3
periodSeconds: 3
readinessProbe:
httpGet:
path: /readnessprobe
port: 8080
initialDelaySeconds: 3
periodSeconds: 3
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "my-storage-class"
resources:
requests:
storage: 1Gi
---
apiVersion: v1
policies:
- name: production_best_practices
isDefault: true
rules:
- identifier: CUSTOM_CONTAINERS_PODS_MISSING_OWNERS
messageOnFailure: ''
- identifier: CUSTOM_CONTAINERS_MISSING_LIVENESSPROBE
messageOnFailure: ''
- identifier: CUSTOM_CONTAINERS_MISSING_READINESSPROBE
messageOnFailure: ''
- identifier: CUSTOM_CONTAINERS_MISSING_IMAGE_TAG
messageOnFailure: ''
- identifier: CUSTOM_CONTAINERS_MIN_REPLICAS
messageOnFailure: ''
- identifier: CUSTOM_CONTAINERS_MISSING_PODANTIAFFINITY
messageOnFailure: ''
- identifier: CUSTOM_CONTAINERS_RESOURCES_REQUESTS_AND_LIMITS
messageOnFailure: ''
- identifier: CUSTOM_CONTAINERS_RESOURCES_REQUESTS_CPU_BELOW_1000M
messageOnFailure: ''
- identifier: CUSTOM_CONTAINERS_TECHNICAL_LABELS
messageOnFailure: ''
- identifier: CUSTOM_CONTAINERS_BUSINESS_LABELS
messageOnFailure: ''
- identifier: CUSTOM_CONTAINERS_SECURITY_LABELS
messageOnFailure: ''
- identifier: CUSTOM_CONTAINERS_RESTRICT_ALPHA_BETA
messageOnFailure: ''
customRules:
## METADATA.OWNERREFERENCES == REQUIRED
- identifier: CUSTOM_CONTAINERS_PODS_MISSING_OWNERS
name: Ensure each pod has owner ReplicaSet,StatefulSet or DaemonSet [CUSTOM RULE]
defaultMessageOnFailure: Delete stand alone Pod
schema:
if:
properties:
kind:
enum:
- Pod
then:
properties:
metadata:
properties:
ownerReferences:
properties:
kind:
enum:
- ReplicaSet
- StatefulSet
- DaemonSet
required:
- ownerReferences
## SPEC.ITEMS.LIVENESSPROBE == REQUIRED
- identifier: CUSTOM_CONTAINERS_MISSING_LIVENESSPROBE
name: Ensure each container has a configured liveness probe [CUSTOM RULE]
defaultMessageOnFailure: Add liveness probe
schema:
definitions:
specContainers:
properties:
spec:
properties:
containers:
items:
required:
- livenessProbe
allOf:
- $ref: '#/definitions/specContainers'
additionalProperties:
$ref: '#'
items:
$ref: '#'
## SPEC.ITEMS.READINESSPROBE == REQUIRED
- identifier: CUSTOM_CONTAINERS_MISSING_READINESSPROBE
name: Ensure each container has a configured readiness probe [CUSTOM RULE]
defaultMessageOnFailure: Add readinessProbe
schema:
definitions:
specContainers:
properties:
spec:
properties:
containers:
items:
required:
- readinessProbe
allOf:
- $ref: '#/definitions/specContainers'
additionalProperties:
$ref: '#'
items:
$ref: '#'
## SPEC.ITEMS.IMAGE.TAG != LATEST|EMPTY
- identifier: CUSTOM_CONTAINERS_MISSING_IMAGE_TAG
name: Ensure each container image has a pinned (tag) version [CUSTOM RULE]
defaultMessageOnFailure: Set image version
schema:
definitions:
specContainers:
properties:
spec:
properties:
containers:
type: array
items:
properties:
image:
type: string
pattern: ^(?=.*[:|@](?=.+)(?!latest)).*$
allOf:
- $ref: '#/definitions/specContainers'
additionalProperties:
$ref: '#'
items:
$ref: '#'
## SPEC.REPLICAS > 1
- identifier: CUSTOM_CONTAINERS_MIN_REPLICAS
name: Ensure Deployment or StatefulSet has replicas set between 2-10 [CUSTOM RULE]
defaultMessageOnFailure: Running 2 or more replicas will increase the availability of the service
schema:
if:
properties:
kind:
enum:
- Deployment
- StatefulSet
then:
properties:
spec:
properties:
replicas:
minimum: 2
maximum: 10
required:
- replicas
## SPEC.AFFINITY.PODANTIAFFINITY == REQUIRED
- identifier: CUSTOM_CONTAINERS_MISSING_PODANTIAFFINITY
name: Ensure each container has a podAntiAffinity [CUSTOM RULE]
defaultMessageOnFailure: You should apply anti-affinity rules to your Deployments and StatefulSet so that Pods are spread in all the nodes of your cluster.
schema:
if:
properties:
kind:
enum:
- Deployment
- StatefulSet
then:
properties:
spec:
properties:
affinity:
required:
- podAntiAffinity
## SPEC.CONTAINERS.RESOURCES [REQUESTS, LIMITS]
- identifier: CUSTOM_CONTAINERS_RESOURCES_REQUESTS_AND_LIMITS
name: Ensure each container has a requests and limits resources [CUSTOM RULE]
defaultMessageOnFailure: An unlimited number of Pods if schedulable on any nodes leading to resource overcommitment and potential node (and kubelet) crashes.
schema:
definitions:
specContainers:
properties:
spec:
properties:
containers:
items:
properties:
resources:
required:
- requests
- limits
required:
- resources
allOf:
- $ref: '#/definitions/specContainers'
additionalProperties:
$ref: '#'
items:
$ref: '#'
## SPEC.CONTAINERS.RESOURCES.REQUESTS.CPU <= 1000m
- identifier: CUSTOM_CONTAINERS_RESOURCES_REQUESTS_CPU_BELOW_1000M
name: Ensure each container has a configured requests CPU <= 1000m [CUSTOM RULE]
defaultMessageOnFailure: Unless you have computational intensive jobs, it is recommended to set the request to 1 CPU or below.
schema:
definitions:
specContainers:
properties:
spec:
properties:
containers:
items:
properties:
resources:
properties:
requests:
properties:
cpu:
resourceMaximum: 1000m
allOf:
- $ref: '#/definitions/specContainers'
additionalProperties:
$ref: '#'
items:
$ref: '#'
## *.METADATA.LABELS == REQUIRED ALL [name, instance, version, component, part-of, managed-by]
- identifier: CUSTOM_CONTAINERS_TECHNICAL_LABELS
name: Ensure each container has technical labels defined [CUSTOM RULE]
defaultMessageOnFailure: Those labels [name, instance, version, component, part-of, managed-by] are recommended by the official documentation.
schema:
definitions:
specContainers:
properties:
metadata:
properties:
labels:
required:
- app.kubernetes.io/name
- app.kubernetes.io/instance
- app.kubernetes.io/version
- app.kubernetes.io/component
- app.kubernetes.io/part-of
- app.kubernetes.io/managed-by
allOf:
- $ref: '#/definitions/specContainers'
additionalProperties:
$ref: '#'
items:
$ref: '#'
## *.METADATA.LABELS == REQUIRED ALL [owner, project, business-unit]
- identifier: CUSTOM_CONTAINERS_BUSINESS_LABELS
name: Ensure each container has business labels defined [CUSTOM RULE]
defaultMessageOnFailure: You can explore labels and tagging for resources on the AWS tagging strategy page.
schema:
definitions:
specContainers:
properties:
metadata:
properties:
labels:
required:
- owner
- project
- business-unit
allOf:
- $ref: '#/definitions/specContainers'
additionalProperties:
$ref: '#'
items:
$ref: '#'
## *.METADATA.LABELS == REQUIRED ALL [confidentiality, compliance]
- identifier: CUSTOM_CONTAINERS_SECURITY_LABELS
name: Ensure each container has security labels defined [CUSTOM RULE]
defaultMessageOnFailure: You can explore labels and tagging for resources on the AWS tagging strategy page.
schema:
definitions:
specContainers:
properties:
metadata:
properties:
labels:
required:
- confidentiality
- compliance
allOf:
- $ref: '#/definitions/specContainers'
additionalProperties:
$ref: '#'
items:
$ref: '#'
## APIVERSION != [*beta*, *alpha* ]
- identifier: CUSTOM_CONTAINERS_RESTRICT_ALPHA_BETA
name: Ensure each container a restrict access to alpha or beta features [CUSTOM RULE]
defaultMessageOnFailure: Alpha and beta Kubernetes features are in active development and may have limitations or bugs that result in security vulnerabilities.
schema:
properties:
apiVersion:
type: string
pattern: ^((?!alpha|beta).)*$
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment