Unverified Commit 02188744 authored by k8s-ci-robot's avatar k8s-ci-robot Committed by GitHub
Browse files

Merge pull request #4422 from justinsb/cherrypicks_for_181

Cherrypicks for 1.8.1
No related merge requests found
Showing with 197 additions and 76 deletions
+197 -76
......@@ -56,7 +56,8 @@ type KopeioNetworkingSpec struct {
// WeaveNetworkingSpec declares that we want Weave networking
type WeaveNetworkingSpec struct {
MTU *int32 `json:"mtu,omitempty"`
MTU *int32 `json:"mtu,omitempty"`
ConnLimit *int32 `json:"connLimit,omitempty"`
}
// FlannelNetworkingSpec declares that we want Flannel networking
......
......@@ -56,7 +56,8 @@ type KopeioNetworkingSpec struct {
// WeaveNetworkingSpec declares that we want Weave networking
type WeaveNetworkingSpec struct {
MTU *int32 `json:"mtu,omitempty"`
MTU *int32 `json:"mtu,omitempty"`
ConnLimit *int32 `json:"connLimit,omitempty"`
}
// FlannelNetworkingSpec declares that we want Flannel networking
......
......@@ -56,7 +56,8 @@ type KopeioNetworkingSpec struct {
// WeaveNetworkingSpec declares that we want Weave networking
type WeaveNetworkingSpec struct {
MTU *int32 `json:"mtu,omitempty"`
MTU *int32 `json:"mtu,omitempty"`
ConnLimit *int32 `json:"connLimit,omitempty"`
}
// FlannelNetworkingSpec declares that we want Flannel networking
......
......@@ -114,16 +114,21 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error {
}
}
if kubernetesVersion.Major == 1 && kubernetesVersion.Minor <= 5 {
clusterSpec.Kubelet.APIServers = "https://" + clusterSpec.MasterInternalName
clusterSpec.MasterKubelet.APIServers = "http://127.0.0.1:8080"
} else if kubernetesVersion.Major == 1 { // for 1.6+ use kubeconfig instead of api-servers
if b.Context.IsKubernetesGTE("1.6") {
// for 1.6+ use kubeconfig instead of api-servers
const kubeconfigPath = "/var/lib/kubelet/kubeconfig"
clusterSpec.Kubelet.KubeconfigPath = kubeconfigPath
clusterSpec.Kubelet.RequireKubeconfig = fi.Bool(true)
clusterSpec.MasterKubelet.KubeconfigPath = kubeconfigPath
clusterSpec.MasterKubelet.RequireKubeconfig = fi.Bool(true)
// Only pass require-kubeconfig to versions prior to 1.9; deprecated & being removed
if b.Context.IsKubernetesLT("1.9") {
clusterSpec.Kubelet.RequireKubeconfig = fi.Bool(true)
clusterSpec.MasterKubelet.RequireKubeconfig = fi.Bool(true)
}
} else {
// Legacy behaviour for <= 1.5
clusterSpec.Kubelet.APIServers = "https://" + clusterSpec.MasterInternalName
clusterSpec.MasterKubelet.APIServers = "http://127.0.0.1:8080"
}
// IsolateMasters enables the legacy behaviour, where master pods on a separate network
......
......@@ -28,7 +28,7 @@ spec:
memory: 100Mi
securityContext:
privileged: true
image: kopeio/networking-agent:1.0.20171015
image: kopeio/networking-agent:1.0.20180203
name: networking-agent
volumeMounts:
- name: lib-modules
......
......@@ -28,7 +28,7 @@ spec:
memory: 100Mi
securityContext:
privileged: true
image: kopeio/networking-agent:1.0.20171015
image: kopeio/networking-agent:1.0.20180203
name: networking-agent
volumeMounts:
- name: lib-modules
......
......@@ -171,7 +171,7 @@ spec:
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.9.1
image: quay.io/coreos/flannel:v0.9.0
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
......
......@@ -3,7 +3,7 @@
# This manifest includes the following component versions:
# calico/node:v2.6.2
# calico/cni:v1.11.0
# coreos/flannel:v0.9.1
# coreos/flannel:v0.9.0 (bug with v0.9.1: https://github.com/kubernetes/kops/issues/4037)
# This ConfigMap can be used to configure a self-hosted Canal installation.
kind: ConfigMap
......@@ -25,25 +25,35 @@ data:
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.1.0",
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"capabilities": {"portMappings": true},
"snat": true
}
]
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
......@@ -176,6 +186,8 @@ spec:
image: quay.io/calico/cni:v1.11.0
command: ["/install-cni.sh"]
env:
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
......@@ -194,7 +206,7 @@ spec:
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.9.1
image: quay.io/coreos/flannel:v0.9.0
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
......
......@@ -130,7 +130,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.6.2
image: quay.io/calico/node:v2.6.6
resources:
requests:
cpu: 10m
......@@ -185,7 +185,7 @@ spec:
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.11.0
image: quay.io/calico/cni:v1.11.2
resources:
requests:
cpu: 10m
......@@ -262,7 +262,8 @@ spec:
serviceAccountName: calico
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-controllers:v1.0.0
# This shouldn't get updated, since this is the last version we shipped that should be used.
image: quay.io/calico/kube-policy-controller:v0.7.0
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
......@@ -316,7 +317,7 @@ spec:
operator: Exists
containers:
- name: calico-kube-controllers
image: quay.io/calico/kube-controllers:v1.0.0
image: quay.io/calico/kube-controllers:v1.0.3
resources:
requests:
cpu: 10m
......@@ -411,7 +412,7 @@ spec:
operator: Exists
serviceAccountName: k8s-ec2-srcdst
containers:
- image: ottoyiu/k8s-ec2-srcdst:v0.1.0
- image: ottoyiu/k8s-ec2-srcdst:v0.2.0-3-gc0c26eca
name: k8s-ec2-srcdst
resources:
requests:
......
......@@ -141,7 +141,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.6.2
image: quay.io/calico/node:v2.6.6
resources:
requests:
cpu: 10m
......@@ -196,7 +196,7 @@ spec:
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.11.0
image: quay.io/calico/cni:v1.11.2
resources:
requests:
cpu: 10m
......@@ -282,7 +282,7 @@ spec:
operator: Exists
containers:
- name: calico-kube-controllers
image: quay.io/calico/kube-controllers:v1.0.0
image: quay.io/calico/kube-controllers:v1.0.3
resources:
requests:
cpu: 10m
......@@ -321,7 +321,8 @@ spec:
serviceAccountName: calico
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-controllers:v1.0.0
# This shouldn't get updated, since this is the last version we shipped that should be used.
image: quay.io/calico/kube-policy-controller:v0.7.0
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
......@@ -421,7 +422,7 @@ spec:
operator: Exists
serviceAccountName: k8s-ec2-srcdst
containers:
- image: ottoyiu/k8s-ec2-srcdst:v0.1.0
- image: ottoyiu/k8s-ec2-srcdst:v0.2.1
name: k8s-ec2-srcdst
resources:
requests:
......
......@@ -237,7 +237,7 @@ spec:
spec:
hostNetwork: true
containers:
- image: ottoyiu/k8s-ec2-srcdst:v0.1.0
- image: ottoyiu/k8s-ec2-srcdst:v0.2.0-3-gc0c26eca
name: k8s-ec2-srcdst
resources:
requests:
......
......@@ -137,7 +137,7 @@ spec:
effect: NoSchedule
containers:
- name: romana-daemon
image: quay.io/romana/daemon:v2.0.0
image: quay.io/romana/daemon:v2.0.2
imagePullPolicy: Always
resources:
requests:
......@@ -170,7 +170,7 @@ spec:
effect: NoSchedule
containers:
- name: romana-listener
image: quay.io/romana/listener:v2.0.0
image: quay.io/romana/listener:v2.0.2
imagePullPolicy: Always
resources:
requests:
......@@ -202,7 +202,7 @@ spec:
effect: NoSchedule
containers:
- name: romana-agent
image: quay.io/romana/agent:v2.0.0
image: quay.io/romana/agent:v2.0.2
imagePullPolicy: Always
resources:
requests:
......@@ -305,7 +305,7 @@ spec:
effect: NoSchedule
containers:
- name: romana-aws
image: quay.io/romana/aws:v2.0.0
image: quay.io/romana/aws:v2.0.2
imagePullPolicy: Always
resources:
requests:
......@@ -334,7 +334,7 @@ spec:
effect: NoSchedule
containers:
- name: romana-vpcrouter
image: quay.io/romana/vpcrouter-romana-plugin:1.1.12
image: quay.io/romana/vpcrouter-romana-plugin:1.1.17
imagePullPolicy: Always
resources:
requests:
......
apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
......@@ -25,24 +34,56 @@ rules:
- list
- watch
---
apiVersion: v1
kind: ServiceAccount
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- weave-net
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
kind: RoleBinding
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
roleRef:
kind: ClusterRole
kind: Role
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
......@@ -54,10 +95,10 @@ apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
namespace: kube-system
spec:
template:
metadata:
......@@ -83,7 +124,11 @@ spec:
- name: WEAVE_MTU
value: "{{ .Networking.Weave.MTU }}"
{{- end }}
image: 'weaveworks/weave-kube:2.0.5'
{{- if .Networking.Weave.ConnLimit }}
- name: CONN_LIMIT
value: "{{ .Networking.Weave.ConnLimit }}"
{{- end }}
image: 'weaveworks/weave-kube:2.2.0'
livenessProbe:
httpGet:
host: 127.0.0.1
......@@ -112,13 +157,15 @@ spec:
- name: lib-modules
mountPath: /lib/modules
- name: weave-npc
args:
- '--use-legacy-netpol'
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-npc:2.0.5'
image: 'weaveworks/weave-npc:2.2.0'
resources:
requests:
cpu: 50m
......
apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
......@@ -17,7 +26,7 @@ rules:
- list
- watch
- apiGroups:
- extensions
- 'networking.k8s.io'
resources:
- networkpolicies
verbs:
......@@ -25,24 +34,56 @@ rules:
- list
- watch
---
apiVersion: v1
kind: ServiceAccount
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- weave-net
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
kind: RoleBinding
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
roleRef:
kind: ClusterRole
kind: Role
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
......@@ -54,10 +95,10 @@ apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
namespace: kube-system
spec:
template:
metadata:
......@@ -83,7 +124,11 @@ spec:
- name: WEAVE_MTU
value: "{{ .Networking.Weave.MTU }}"
{{- end }}
image: 'weaveworks/weave-kube:2.0.5'
{{- if .Networking.Weave.ConnLimit }}
- name: CONN_LIMIT
value: "{{ .Networking.Weave.ConnLimit }}"
{{- end }}
image: 'weaveworks/weave-kube:2.2.0'
livenessProbe:
httpGet:
host: 127.0.0.1
......@@ -114,13 +159,14 @@ spec:
- name: xtables-lock
mountPath: /run/xtables.lock
- name: weave-npc
args: []
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-npc:2.0.5'
image: 'weaveworks/weave-npc:2.2.0'
resources:
requests:
cpu: 50m
......
......@@ -42,7 +42,11 @@ spec:
- name: WEAVE_MTU
value: "{{ .Networking.Weave.MTU }}"
{{- end }}
image: 'weaveworks/weave-kube:2.0.5'
{{- if .Networking.Weave.ConnLimit }}
- name: CONN_LIMIT
value: "{{ .Networking.Weave.ConnLimit }}"
{{- end }}
image: 'weaveworks/weave-kube:2.2.0'
livenessProbe:
httpGet:
host: 127.0.0.1
......@@ -71,13 +75,15 @@ spec:
- name: lib-modules
mountPath: /lib/modules
- name: weave-npc
args:
- '--use-legacy-netpol'
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-npc:2.0.5'
image: 'weaveworks/weave-npc:2.2.0'
resources:
requests:
cpu: 50m
......
......@@ -353,7 +353,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
if b.cluster.Spec.Networking.Kopeio != nil {
key := "networking.kope.io"
version := "1.0.20171015"
version := "1.0.20180203"
{
location := key + "/pre-k8s-1.6.yaml"
......@@ -388,8 +388,8 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
if b.cluster.Spec.Networking.Weave != nil {
key := "networking.weave"
// 2.0.6-kops.1 = 2.0.5 with kops manifest tweaks. This should go away with the next version bump.
version := "2.0.6-kops.1"
// 2.2.0-kops.2 = 2.2.0, kops packaging version 1.
version := "2.2.0-kops.1"
{
location := key + "/pre-k8s-1.6.yaml"
......@@ -603,7 +603,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
if b.cluster.Spec.Networking.Romana != nil {
key := "networking.romana"
version := "v2.0.0"
version := "v2.0.2"
{
location := key + "/k8s-1.7.yaml"
......
......@@ -69,11 +69,11 @@ spec:
name: networking.kope.io
selector:
role.kubernetes.io/networking: "1"
version: 1.0.20171015
version: 1.0.20180203
- id: k8s-1.6
kubernetesVersion: '>=1.6.0'
manifest: networking.kope.io/k8s-1.6.yaml
name: networking.kope.io
selector:
role.kubernetes.io/networking: "1"
version: 1.0.20171015
version: 1.0.20180203
......@@ -69,18 +69,18 @@ spec:
name: networking.weave
selector:
role.kubernetes.io/networking: "1"
version: 2.0.6-kops.1
version: 2.2.0-kops.1
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.7.0'
manifest: networking.weave/k8s-1.6.yaml
name: networking.weave
selector:
role.kubernetes.io/networking: "1"
version: 2.0.6-kops.1
version: 2.2.0-kops.1
- id: k8s-1.7
kubernetesVersion: '>=1.7.0'
manifest: networking.weave/k8s-1.7.yaml
name: networking.weave
selector:
role.kubernetes.io/networking: "1"
version: 2.0.6-kops.1
version: 2.2.0-kops.1
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment