Commit 866db995 authored by MengxinLiu's avatar MengxinLiu Committed by oilbeater
Browse files

merge images into one

parent 232251bb
Showing with 43 additions and 225 deletions
+43 -225
......@@ -2,21 +2,16 @@ GOFILES_NOVENDOR=$(shell find . -type f -name '*.go' -not -path "./vendor/*")
GO_VERSION=1.13
REGISTRY=index.alauda.cn/alaudak8s
ROLES=node controller cni db webhook pinger
DEV_TAG=dev
RELEASE_TAG=$(shell cat VERSION)
.PHONY: build-dev-images build-go build-bin test lint up down halt suspend resume kind push-dev push-release
build-dev-images: build-bin
@for role in ${ROLES} ; do \
docker build -t ${REGISTRY}/kube-ovn-$$role:${DEV_TAG} -f dist/images/Dockerfile.$$role dist/images/; \
done
docker build -t ${REGISTRY}/kube-ovn:${DEV_TAG} -f dist/images/Dockerfile dist/images/
push-dev:
@for role in ${ROLES} ; do \
docker push ${REGISTRY}/kube-ovn-$$role:${DEV_TAG}; \
done
docker push ${REGISTRY}/kube-ovn:${DEV_TAG}
build-go:
CGO_ENABLED=0 GOOS=linux go build -o $(PWD)/dist/images/kube-ovn -ldflags "-w -s" -v ./cmd/cni
......@@ -26,14 +21,10 @@ build-go:
CGO_ENABLED=0 GOOS=linux go build -o $(PWD)/dist/images/kube-ovn-pinger -ldflags "-w -s" -v ./cmd/pinger
release: lint build-go
@for role in ${ROLES} ; do \
docker build -t ${REGISTRY}/kube-ovn-$$role:${RELEASE_TAG} -f dist/images/Dockerfile.$$role dist/images/; \
done
docker build -t ${REGISTRY}/kube-ovn:${RELEASE_TAG} -f dist/images/Dockerfile dist/images/
push-release:
@for role in ${ROLES} ; do \
docker push ${REGISTRY}/kube-ovn-$$role:${RELEASE_TAG}; \
done
docker push ${REGISTRY}/kube-ovn:${RELEASE_TAG}
lint:
@gofmt -d ${GOFILES_NOVENDOR}
......@@ -70,9 +61,7 @@ suspend:
kind-init:
kind delete cluster --name=kube-ovn
kind create cluster --config yamls/kind.yaml --name kube-ovn
@for role in ${ROLES} ; do \
kind load docker-image --name kube-ovn ${REGISTRY}/kube-ovn-$$role:${RELEASE_TAG}; \
done
kind load docker-image --name kube-ovn ${REGISTRY}/kube-ovn:${RELEASE_TAG}
kubectl label node kube-ovn-control-plane kube-ovn/role=master
kubectl apply -f yamls/crd.yaml
kubectl apply -f yamls/ovn.yaml
......@@ -81,18 +70,14 @@ kind-init:
kind-init-ha:
kind delete cluster --name=kube-ovn
kind create cluster --config yamls/kind.yaml --name kube-ovn
@for role in ${ROLES} ; do \
kind load docker-image --name kube-ovn ${REGISTRY}/kube-ovn-$$role:${RELEASE_TAG}; \
done
kind load docker-image --name kube-ovn ${REGISTRY}/kube-ovn:${RELEASE_TAG}
kubectl label node --all kube-ovn/role=master
kubectl apply -f yamls/crd.yaml
kubectl apply -f yamls/ovn-ha.yaml
kubectl apply -f yamls/kube-ovn.yaml
kind-reload:
@for role in ${ROLES} ; do \
kind load docker-image ${REGISTRY}/kube-ovn-$$role:${RELEASE_TAG}; \
done
kind load docker-image --name kube-ovn ${REGISTRY}/kube-ovn:${RELEASE_TAG}
kubectl delete pod -n kube-ovn --all
kind-clean:
......
......@@ -25,17 +25,24 @@ RUN rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERS
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-central-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-host-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm
RUN mkdir -p /var/run/openvswitch
RUN mkdir -p /var/run/openvswitch && \
mkdir -p /var/run/ovn && \
mkdir -p /etc/cni/net.d && \
mkdir -p /opt/cni/bin
ENV CNI_VERSION=v0.7.5
RUN curl -sSf -L --retry 5 https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz | tar -xz -C . ./loopback ./portmap
COPY start-cniserver.sh /kube-ovn/start-cniserver.sh
COPY install-cni.sh /kube-ovn/install-cni.sh
ENV KUBE_VERSION="v1.13.2"
RUN curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl -o /usr/bin/kubectl \
&& chmod +x /usr/bin/kubectl
COPY *.sh /kube-ovn/
COPY 00-kube-ovn.conflist /kube-ovn/00-kube-ovn.conflist
WORKDIR /kube-ovn
CMD ["sh", "start-cniserver.sh"]
COPY kube-ovn /kube-ovn/kube-ovn
COPY kube-ovn-daemon /kube-ovn/kube-ovn-daemon
COPY kube-ovn-pinger /kube-ovn/kube-ovn-pinger
COPY kube-ovn-controller /kube-ovn/kube-ovn-controller
FROM centos:7
ENV PYTHONDONTWRITEBYTECODE yes
RUN yum install -y \
PyYAML bind-utils \
openssl \
numactl-libs \
firewalld-filesystem \
libpcap \
hostname \
iproute strace socat nc \
unbound unbound-devel \
tcpdump ipset \
epel-release https://centos7.iuscommunity.org/ius-release.rpm
RUN yum install python34 -y && ln -s /bin/python3.4 /bin/python3 && yum clean all
ENV OVS_VERSION=20.03.0
ENV OVS_SUBVERSION=1
RUN rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-devel-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-central-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-host-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm
RUN mkdir -p /var/run/openvswitch
WORKDIR /kube-ovn
CMD ["sh", "start-controller.sh"]
COPY start-controller.sh /kube-ovn/start-controller.sh
COPY kube-ovn-controller-healthcheck.sh /kube-ovn/kube-ovn-controller-healthcheck.sh
COPY kube-ovn-controller /kube-ovn/kube-ovn-controller
FROM centos:7
ENV PYTHONDONTWRITEBYTECODE yes
RUN yum install -y \
PyYAML bind-utils \
openssl \
numactl-libs \
firewalld-filesystem \
libpcap \
hostname \
iproute strace socat nc \
unbound unbound-devel \
tcpdump ipset \
epel-release https://centos7.iuscommunity.org/ius-release.rpm
RUN yum install python34 -y && ln -s /bin/python3.4 /bin/python3 && yum clean all
ENV OVS_VERSION=20.03.0
ENV OVS_SUBVERSION=1
RUN rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-devel-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-central-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-host-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm
RUN mkdir -p /var/run/openvswitch
ENV KUBE_VERSION="v1.13.2"
RUN curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl -o /usr/bin/kubectl \
&& chmod +x /usr/bin/kubectl
COPY ovn-healthcheck.sh /root/ovn-healthcheck.sh
COPY ovn-is-leader.sh /root/ovn-is-leader.sh
COPY start-db.sh /root/start-db.sh
CMD ["/bin/bash", "/root/start-db.sh"]
FROM centos:7
ENV PYTHONDONTWRITEBYTECODE yes
RUN yum install -y \
PyYAML bind-utils \
openssl \
numactl-libs \
firewalld-filesystem \
libpcap \
hostname \
iproute strace socat nc \
unbound unbound-devel \
tcpdump ipset \
epel-release https://centos7.iuscommunity.org/ius-release.rpm
RUN yum install python34 -y && ln -s /bin/python3.4 /bin/python3 && yum clean all
ENV OVS_VERSION=20.03.0
ENV OVS_SUBVERSION=1
RUN rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-devel-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-central-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-host-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm
RUN mkdir -p /var/run/openvswitch
RUN mkdir -p /var/run/openvswitch && \
mkdir -p /etc/cni/net.d && \
mkdir -p /opt/cni/bin
COPY ovs-healthcheck.sh /root/ovs-healthcheck.sh
COPY start-ovs.sh /root/start-ovs.sh
CMD ["/bin/bash", "/root/start-ovs.sh"]
FROM centos:7
ENV PYTHONDONTWRITEBYTECODE yes
RUN yum install -y \
PyYAML bind-utils \
openssl \
numactl-libs \
firewalld-filesystem \
libpcap \
hostname \
iproute strace socat nc \
unbound unbound-devel \
tcpdump ipset \
epel-release https://centos7.iuscommunity.org/ius-release.rpm
RUN yum install python34 -y && ln -s /bin/python3.4 /bin/python3 && yum clean all
ENV OVS_VERSION=20.03.0
ENV OVS_SUBVERSION=1
RUN rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-devel-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-central-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-host-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm
RUN mkdir -p /var/run/openvswitch
WORKDIR /kube-ovn
CMD ["/kube-ovn/kube-ovn-pinger"]
COPY kube-ovn-pinger /kube-ovn/kube-ovn-pinger
RUN chmod +x /kube-ovn/kube-ovn-pinger
FROM centos:7
ENV PYTHONDONTWRITEBYTECODE yes
RUN yum install -y \
PyYAML bind-utils \
openssl \
numactl-libs \
firewalld-filesystem \
libpcap \
hostname \
iproute strace socat nc \
unbound unbound-devel \
tcpdump ipset \
epel-release https://centos7.iuscommunity.org/ius-release.rpm
RUN yum install python34 -y && ln -s /bin/python3.4 /bin/python3 && yum clean all
ENV OVS_VERSION=20.03.0
ENV OVS_SUBVERSION=1
RUN rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-devel-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-central-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-host-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm
RUN mkdir -p /var/run/openvswitch
WORKDIR /kube-ovn
CMD ["sh", "start-webhook.sh"]
COPY start-webhook.sh /kube-ovn/start-webhook.sh
COPY kube-ovn-webhook /kube-ovn/kube-ovn-webhook
......@@ -39,7 +39,7 @@ spec:
hostNetwork: true
containers:
- name: kube-ovn-controller
image: "index.alauda.cn/alaudak8s/kube-ovn-controller:v1.1.0-pre"
image: "index.alauda.cn/alaudak8s/kube-ovn:v1.1.0-pre"
imagePullPolicy: IfNotPresent
command:
- /kube-ovn/start-controller.sh
......@@ -110,7 +110,7 @@ spec:
hostPID: true
initContainers:
- name: install-cni
image: "index.alauda.cn/alaudak8s/kube-ovn-cni:v1.1.0-pre"
image: "index.alauda.cn/alaudak8s/kube-ovn:v1.1.0-pre"
imagePullPolicy: IfNotPresent
command: ["/kube-ovn/install-cni.sh"]
securityContext:
......@@ -123,7 +123,7 @@ spec:
name: cni-bin
containers:
- name: cni-server
image: "index.alauda.cn/alaudak8s/kube-ovn-cni:v1.1.0-pre"
image: "index.alauda.cn/alaudak8s/kube-ovn:v1.1.0-pre"
command: ["sh", "/kube-ovn/start-cniserver.sh"]
args:
- --enable-mirror=false
......@@ -206,8 +206,9 @@ spec:
hostPID: true
containers:
- name: pinger
image: "index.alauda.cn/alaudak8s/kube-ovn-pinger:v1.1.0-pre"
image: "index.alauda.cn/alaudak8s/kube-ovn:v1.1.0-pre"
imagePullPolicy: IfNotPresent
command: ["/kube-ovn/kube-ovn-pinger"]
securityContext:
runAsUser: 0
privileged: false
......
......@@ -39,7 +39,7 @@ spec:
hostNetwork: true
containers:
- name: kube-ovn-controller
image: "index.alauda.cn/alaudak8s/kube-ovn-controller:v1.1.0-pre"
image: "index.alauda.cn/alaudak8s/kube-ovn:v1.1.0-pre"
imagePullPolicy: IfNotPresent
command:
- /kube-ovn/start-controller.sh
......@@ -108,7 +108,7 @@ spec:
hostPID: true
initContainers:
- name: install-cni
image: "index.alauda.cn/alaudak8s/kube-ovn-cni:v1.1.0-pre"
image: "index.alauda.cn/alaudak8s/kube-ovn:v1.1.0-pre"
imagePullPolicy: IfNotPresent
command: ["/kube-ovn/install-cni.sh"]
securityContext:
......@@ -121,7 +121,7 @@ spec:
name: cni-bin
containers:
- name: cni-server
image: "index.alauda.cn/alaudak8s/kube-ovn-cni:v1.1.0-pre"
image: "index.alauda.cn/alaudak8s/kube-ovn:v1.1.0-pre"
imagePullPolicy: IfNotPresent
command:
- sh
......@@ -211,7 +211,7 @@ spec:
hostPID: true
containers:
- name: pinger
image: "index.alauda.cn/alaudak8s/kube-ovn-pinger:v1.1.0-pre"
image: "index.alauda.cn/alaudak8s/kube-ovn:v1.1.0-pre"
command: ["/kube-ovn/kube-ovn-pinger", "--external-address=114.114.114.114"]
imagePullPolicy: IfNotPresent
securityContext:
......
......@@ -155,8 +155,9 @@ spec:
hostNetwork: true
containers:
- name: ovn-central
image: "index.alauda.cn/alaudak8s/kube-ovn-db:v1.1.0-pre"
image: "index.alauda.cn/alaudak8s/kube-ovn:v1.1.0-pre"
imagePullPolicy: IfNotPresent
command: ["/kube-ovn/start-db.sh"]
securityContext:
capabilities:
add: ["SYS_NICE"]
......@@ -197,13 +198,13 @@ spec:
exec:
command:
- sh
- /root/ovn-is-leader.sh
- /kube-ovn/ovn-is-leader.sh
periodSeconds: 3
livenessProbe:
exec:
command:
- sh
- /root/ovn-healthcheck.sh
- /kube-ovn/ovn-healthcheck.sh
initialDelaySeconds: 30
periodSeconds: 7
failureThreshold: 5
......@@ -261,8 +262,9 @@ spec:
hostPID: true
containers:
- name: openvswitch
image: "index.alauda.cn/alaudak8s/kube-ovn-node:v1.1.0-pre"
image: "index.alauda.cn/alaudak8s/kube-ovn:v1.1.0-pre"
imagePullPolicy: IfNotPresent
command: ["/kube-ovn/start-ovs.sh"]
securityContext:
runAsUser: 0
privileged: true
......@@ -292,13 +294,13 @@ spec:
exec:
command:
- sh
- /root/ovs-healthcheck.sh
- /kube-ovn/ovs-healthcheck.sh
periodSeconds: 5
livenessProbe:
exec:
command:
- sh
- /root/ovs-healthcheck.sh
- /kube-ovn/ovs-healthcheck.sh
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 5
......
......@@ -156,8 +156,9 @@ spec:
hostNetwork: true
containers:
- name: ovn-central
image: "index.alauda.cn/alaudak8s/kube-ovn-db:v1.1.0-pre"
image: "index.alauda.cn/alaudak8s/kube-ovn:v1.1.0-pre"
imagePullPolicy: IfNotPresent
command: ["/kube-ovn/start-db.sh"]
securityContext:
capabilities:
add: ["SYS_NICE"]
......@@ -196,13 +197,13 @@ spec:
exec:
command:
- sh
- /root/ovn-is-leader.sh
- /kube-ovn/ovn-is-leader.sh
periodSeconds: 3
livenessProbe:
exec:
command:
- sh
- /root/ovn-healthcheck.sh
- /kube-ovn/ovn-healthcheck.sh
initialDelaySeconds: 30
periodSeconds: 7
failureThreshold: 5
......@@ -260,8 +261,9 @@ spec:
hostPID: true
containers:
- name: openvswitch
image: "index.alauda.cn/alaudak8s/kube-ovn-node:v1.1.0-pre"
image: "index.alauda.cn/alaudak8s/kube-ovn:v1.1.0-pre"
imagePullPolicy: IfNotPresent
command: ["/kube-ovn/start-ovs.sh"]
securityContext:
runAsUser: 0
privileged: true
......@@ -291,13 +293,13 @@ spec:
exec:
command:
- sh
- /root/ovs-healthcheck.sh
- /kube-ovn/ovs-healthcheck.sh
periodSeconds: 5
livenessProbe:
exec:
command:
- sh
- /root/ovs-healthcheck.sh
- /kube-ovn/ovs-healthcheck.sh
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 5
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment