Commit 730e4f17 authored by zhangzujian's avatar zhangzujian
Browse files

improve support for dual-stack

parent 0c252cd3
Showing with 350 additions and 200 deletions
+350 -200
......@@ -473,6 +473,55 @@ jobs:
sudo chmod 666 /home/runner/.kube/config
make e2e-underlay-single-nic
dual-stack-e2e:
needs: build
name: dual-stack-e2e
runs-on: ubuntu-18.04
timeout-minutes: 30
steps:
- name: Check out code
uses: actions/checkout@v2
- name: Install Kind
run: |
curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-$(uname)-amd64
chmod +x ./kind
sudo mv kind /usr/local/bin
- name: Init Kind
run: |
pip install j2cli --user
pip install "j2cli[yaml]" --user
sudo PATH=~/.local/bin:$PATH make kind-init-dual
- name: Download image
uses: actions/download-artifact@v2
with:
name: image
- name: Load Image
run: |
docker load --input image.tar
- name: Install Kube-OVN
run: |
docker load --input image.tar
sudo make kind-install-dual
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.16
id: go
- name: Run E2E
run: |
go get -u github.com/onsi/ginkgo/ginkgo
go get -u github.com/onsi/gomega/...
sudo kubectl cluster-info
sudo chmod 666 /home/runner/.kube/config
make e2e
no-lb-e2e:
needs: build
name: disable-loadbalancer-e2e
......@@ -625,6 +674,7 @@ jobs:
- ipv6-e2e
- ipv6-vlan-e2e
- ipv6-underlay-e2e-single-nic
- dual-stack-e2e
- no-lb-e2e
- no-lb-iptables-e2e
- no-np-e2e
......
GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*")
GO_VERSION = 1.16
REGISTRY = kubeovn
......@@ -155,7 +154,7 @@ kind-install-single:
kind-install-ipv6:
kind load docker-image --name kube-ovn $(REGISTRY)/kube-ovn:$(RELEASE_TAG)
kubectl taint node kube-ovn-control-plane node-role.kubernetes.io/master:NoSchedule-
ENABLE_SSL=true IPv6=true dist/images/install.sh
ENABLE_SSL=true IPV6=true dist/images/install.sh
.PHONY: kind-install-underlay-ipv6
kind-install-underlay-ipv6:
......@@ -170,13 +169,13 @@ kind-install-underlay-ipv6:
@chmod +x install-underlay.sh
kind load docker-image --name kube-ovn $(REGISTRY)/kube-ovn:$(RELEASE_TAG)
kubectl taint node kube-ovn-control-plane node-role.kubernetes.io/master:NoSchedule-
ENABLE_SSL=true IPv6=true ENABLE_VLAN=true VLAN_NIC=eth0 ./install-underlay.sh
ENABLE_SSL=true IPV6=true ENABLE_VLAN=true VLAN_NIC=eth0 ./install-underlay.sh
.PHONY: kind-install-dual
kind-install-dual:
kind load docker-image --name kube-ovn $(REGISTRY)/kube-ovn:$(RELEASE_TAG)
kubectl taint node kube-ovn-control-plane node-role.kubernetes.io/master:NoSchedule-
ENABLE_SSL=true DualStack=true dist/images/install.sh
ENABLE_SSL=true DUAL_STACK=true dist/images/install.sh
kubectl describe no
.PHONY: kind-reload
......@@ -196,8 +195,8 @@ uninstall:
.PHONY: lint
lint:
@gofmt -d $(GOFILES_NOVENDOR)
@if [ $$(gofmt -l $(GOFILES_NOVENDOR) | wc -l) -ne 0 ]; then \
@gofmt -d .
@if [ $$(gofmt -l . | wc -l) -ne 0 ]; then \
echo "Code differs from gofmt's style" 1>&2 && exit 1; \
fi
@GOOS=linux go vet ./...
......
......@@ -4,12 +4,13 @@ import (
"os"
"strings"
"k8s.io/klog"
"github.com/kubeovn/kube-ovn/cmd/controller"
"github.com/kubeovn/kube-ovn/cmd/daemon"
"github.com/kubeovn/kube-ovn/cmd/ovn_monitor"
"github.com/kubeovn/kube-ovn/cmd/pinger"
"github.com/kubeovn/kube-ovn/cmd/speaker"
"k8s.io/klog"
)
const (
......
......@@ -3,21 +3,20 @@ package controller
import (
"context"
"fmt"
v1 "k8s.io/api/authorization/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"net/http"
_ "net/http/pprof" // #nosec
"os"
"time"
"github.com/kubeovn/kube-ovn/versions"
"github.com/prometheus/client_golang/prometheus/promhttp"
v1 "k8s.io/api/authorization/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
"k8s.io/sample-controller/pkg/signals"
"github.com/kubeovn/kube-ovn/pkg/controller"
"github.com/kubeovn/kube-ovn/pkg/ovs"
"k8s.io/klog"
"k8s.io/sample-controller/pkg/signals"
"github.com/kubeovn/kube-ovn/versions"
)
func CmdMain() {
......
......@@ -3,11 +3,11 @@ package ovn_monitor
import (
"net/http"
"github.com/kubeovn/kube-ovn/versions"
ovn "github.com/kubeovn/kube-ovn/pkg/ovnmonitor"
"github.com/prometheus/client_golang/prometheus/promhttp"
"k8s.io/klog"
ovn "github.com/kubeovn/kube-ovn/pkg/ovnmonitor"
"github.com/kubeovn/kube-ovn/versions"
)
func CmdMain() {
......
......@@ -5,11 +5,11 @@ import (
"net/http"
_ "net/http/pprof" // #nosec
"github.com/kubeovn/kube-ovn/versions"
"github.com/kubeovn/kube-ovn/pkg/pinger"
"github.com/prometheus/client_golang/prometheus/promhttp"
"k8s.io/klog"
"github.com/kubeovn/kube-ovn/pkg/pinger"
"github.com/kubeovn/kube-ovn/versions"
)
func CmdMain() {
......
......@@ -4,11 +4,12 @@ import (
"fmt"
"net/http"
"github.com/kubeovn/kube-ovn/pkg/speaker"
"github.com/kubeovn/kube-ovn/versions"
"github.com/prometheus/client_golang/prometheus/promhttp"
"k8s.io/klog"
"k8s.io/sample-controller/pkg/signals"
"github.com/kubeovn/kube-ovn/pkg/speaker"
"github.com/kubeovn/kube-ovn/versions"
)
func CmdMain() {
......
......@@ -6,12 +6,6 @@ import (
"os"
"time"
"github.com/kubeovn/kube-ovn/versions"
ovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1"
"github.com/kubeovn/kube-ovn/pkg/ovs"
ovnwebhook "github.com/kubeovn/kube-ovn/pkg/webhook"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
......@@ -19,6 +13,11 @@ import (
"k8s.io/klog/klogr"
ctrl "sigs.k8s.io/controller-runtime"
ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
ovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1"
"github.com/kubeovn/kube-ovn/pkg/ovs"
ovnwebhook "github.com/kubeovn/kube-ovn/pkg/webhook"
"github.com/kubeovn/kube-ovn/versions"
)
const (
......
#!/usr/bin/env bash
set -euo pipefail
IPv6=${IPv6:-false}
IPV6=${IPV6:-false}
ENABLE_SSL=${ENABLE_SSL:-false}
ENABLE_VLAN=${ENABLE_VLAN:-false}
ENABLE_MIRROR=${ENABLE_MIRROR:-false}
......@@ -25,7 +25,7 @@ SVC_CIDR="10.96.0.0/12" # Do NOT overlap with NODE/POD/JOIN CIDR
JOIN_CIDR="100.64.0.0/16" # Do NOT overlap with NODE/POD/SVC CIDR
PINGER_EXTERNAL_ADDRESS="114.114.114.114" # Pinger check external ip probe
PINGER_EXTERNAL_DOMAIN="alauda.cn" # Pinger check external domain probe
if [ "$IPv6" = "true" ]; then
if [ "$IPV6" = "true" ]; then
POD_CIDR="fd00:10:16::/64" # Do NOT overlap with NODE/SVC/JOIN CIDR
POD_GATEWAY="fd00:10:16::1"
SVC_CIDR="fd00:10:96::/112" # Do NOT overlap with NODE/POD/JOIN CIDR
......
#!/usr/bin/env bash
set -euo pipefail
IPv6=${IPv6:-false}
DualStack=${DualStack:-false}
IPV6=${IPV6:-false}
DUAL_STACK=${DUAL_STACK:-false}
ENABLE_SSL=${ENABLE_SSL:-false}
ENABLE_VLAN=${ENABLE_VLAN:-false}
ENABLE_MIRROR=${ENABLE_MIRROR:-false}
......@@ -26,7 +26,8 @@ SVC_CIDR="10.96.0.0/12" # Do NOT overlap with NODE/POD/JOIN CIDR
JOIN_CIDR="100.64.0.0/16" # Do NOT overlap with NODE/POD/SVC CIDR
PINGER_EXTERNAL_ADDRESS="114.114.114.114" # Pinger check external ip probe
PINGER_EXTERNAL_DOMAIN="alauda.cn" # Pinger check external domain probe
if [ "$IPv6" = "true" ]; then
SVC_YAML_IPFAMILYPOLICY=""
if [ "$IPV6" = "true" ]; then
POD_CIDR="fd00:10:16::/64" # Do NOT overlap with NODE/SVC/JOIN CIDR
POD_GATEWAY="fd00:10:16::1"
SVC_CIDR="fd00:10:96::/112" # Do NOT overlap with NODE/POD/JOIN CIDR
......@@ -34,13 +35,14 @@ if [ "$IPv6" = "true" ]; then
PINGER_EXTERNAL_ADDRESS="2400:3200::1"
PINGER_EXTERNAL_DOMAIN="google.com"
fi
if [ "$DualStack" = "true" ]; then
if [ "$DUAL_STACK" = "true" ]; then
POD_CIDR="10.16.0.0/16,fd00:10:16::/64" # Do NOT overlap with NODE/SVC/JOIN CIDR
POD_GATEWAY="10.16.0.1,fd00:10:16::1"
SVC_CIDR="10.96.0.0/12" # Do NOT overlap with NODE/POD/JOIN CIDR
JOIN_CIDR="100.64.0.0/16,fd00:100:64::/64" # Do NOT overlap with NODE/POD/SVC CIDR
PINGER_EXTERNAL_ADDRESS="114.114.114.114"
PINGER_EXTERNAL_DOMAIN="google.com"
SVC_YAML_IPFAMILYPOLICY="ipFamilyPolicy: PreferDualStack"
fi
EXCLUDE_IPS="" # EXCLUDE_IPS for default subnet
......@@ -927,6 +929,7 @@ spec:
port: 6641
targetPort: 6641
type: ClusterIP
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: ovn-central
ovn-nb-leader: "true"
......@@ -945,6 +948,7 @@ spec:
port: 6642
targetPort: 6642
type: ClusterIP
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: ovn-central
ovn-sb-leader: "true"
......@@ -963,6 +967,7 @@ spec:
port: 6643
targetPort: 6643
type: ClusterIP
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: ovn-central
ovn-northd-leader: "true"
......@@ -1408,6 +1413,7 @@ spec:
port: 6641
targetPort: 6641
type: ClusterIP
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: ovn-central
ovn-nb-leader: "true"
......@@ -1425,6 +1431,7 @@ spec:
port: 6642
targetPort: 6642
type: ClusterIP
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: ovn-central
ovn-sb-leader: "true"
......@@ -1442,6 +1449,7 @@ spec:
port: 6643
targetPort: 6643
type: ClusterIP
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: ovn-central
ovn-northd-leader: "true"
......@@ -2233,6 +2241,7 @@ spec:
- name: metrics
port: 10661
type: ClusterIP
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: kube-ovn-monitor
sessionAffinity: None
......@@ -2245,6 +2254,7 @@ metadata:
labels:
app: kube-ovn-pinger
spec:
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: kube-ovn-pinger
ports:
......@@ -2259,6 +2269,7 @@ metadata:
labels:
app: kube-ovn-controller
spec:
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: kube-ovn-controller
ports:
......@@ -2273,6 +2284,7 @@ metadata:
labels:
app: kube-ovn-cni
spec:
${SVC_YAML_IPFAMILYPOLICY}
selector:
app: kube-ovn-cni
ports:
......@@ -2373,7 +2385,22 @@ trace(){
namespace="default"
fi
podIP=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/ip_address})
dst="$2"
if [ -z "$dst" ]; then
echo "need a target ip address"
exit 1
fi
af="4"
nw="nw"
proto=""
if [[ "$dst" =~ .*:.* ]]; then
af="6"
nw="ipv6"
proto="6"
fi
podIPs=($(kubectl get pod "$podName" -n "$namespace" -o jsonpath="{.status.podIPs[*].ip}"))
mac=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/mac_address})
ls=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/logical_switch})
hostNetwork=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.hostNetwork})
......@@ -2389,6 +2416,24 @@ trace(){
exit 1
fi
podIP=""
for ip in ${podIPs[@]}; do
if [ "$af" = "4" ]; then
if [[ ! "$ip" =~ .*:.* ]]; then
podIP=$ip
break
fi
elif [[ "$ip" =~ .*:.* ]]; then
podIP=$ip
break
fi
done
if [ -z "$podIP" ]; then
echo "Pod has no IPv$af address"
exit 1
fi
gwMac=""
if [ ! -z "$(kubectl get subnet $ls -o jsonpath={.spec.vlan})" ]; then
ovnCni=$(kubectl get pod -n $KUBE_OVN_NS -o wide | grep -w kube-ovn-cni | grep " $nodeName " | awk '{print $1}')
......@@ -2431,23 +2476,7 @@ trace(){
exit 1
fi
dst="$2"
if [ -z "$dst" ]; then
echo "need a target ip address"
exit 1
fi
type="$3"
af="4"
nw="nw"
proto=""
if [[ "$podIP" =~ .*:.* ]]; then
af="6"
nw="ipv6"
proto="6"
fi
case $type in
icmp)
set -x
......
......@@ -70,7 +70,22 @@ trace(){
namespace="default"
fi
podIP=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/ip_address})
dst="$2"
if [ -z "$dst" ]; then
echo "need a target ip address"
exit 1
fi
af="4"
nw="nw"
proto=""
if [[ "$dst" =~ .*:.* ]]; then
af="6"
nw="ipv6"
proto="6"
fi
podIPs=($(kubectl get pod "$podName" -n "$namespace" -o jsonpath="{.status.podIPs[*].ip}"))
mac=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/mac_address})
ls=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.metadata.annotations.ovn\\.kubernetes\\.io/logical_switch})
hostNetwork=$(kubectl get pod "$podName" -n "$namespace" -o jsonpath={.spec.hostNetwork})
......@@ -86,6 +101,24 @@ trace(){
exit 1
fi
podIP=""
for ip in ${podIPs[@]}; do
if [ "$af" = "4" ]; then
if [[ ! "$ip" =~ .*:.* ]]; then
podIP=$ip
break
fi
elif [[ "$ip" =~ .*:.* ]]; then
podIP=$ip
break
fi
done
if [ -z "$podIP" ]; then
echo "Pod has no IPv$af address"
exit 1
fi
gwMac=""
if [ ! -z "$(kubectl get subnet $ls -o jsonpath={.spec.vlan})" ]; then
ovnCni=$(kubectl get pod -n $KUBE_OVN_NS -o wide | grep -w kube-ovn-cni | grep " $nodeName " | awk '{print $1}')
......@@ -128,23 +161,7 @@ trace(){
exit 1
fi
dst="$2"
if [ -z "$dst" ]; then
echo "need a target ip address"
exit 1
fi
type="$3"
af="4"
nw="nw"
proto=""
if [[ "$podIP" =~ .*:.* ]]; then
af="6"
nw="ipv6"
proto="6"
fi
case $type in
icmp)
set -x
......
......@@ -204,9 +204,10 @@ func (c *Controller) handleAddNode(key string) error {
return err
}
nodeIP := util.GetNodeInternalIP(*node)
nodeIPv4, nodeIPv6 := util.GetNodeInternalIP(*node)
for _, subnet := range subnets {
if subnet.Spec.Vlan == "" && subnet.Spec.Vpc == util.DefaultVpc && util.CIDRContainIP(subnet.Spec.CIDRBlock, nodeIP) {
if subnet.Spec.Vlan == "" && subnet.Spec.Vpc == util.DefaultVpc &&
(util.CIDRContainIP(subnet.Spec.CIDRBlock, nodeIPv4) || util.CIDRContainIP(subnet.Spec.CIDRBlock, nodeIPv6)) {
msg := fmt.Sprintf("internal IP address of node %s is in CIDR of subnet %s, this may result in network issues", node.Name, subnet.Name)
klog.Warning(msg)
c.recorder.Eventf(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: node.Name, UID: types.UID(node.Name)}}, v1.EventTypeWarning, "NodeAddressConflictWithSubnet", msg)
......@@ -269,12 +270,17 @@ func (c *Controller) handleAddNode(key string) error {
return err
}
// There is only one nodeAddr temp
nodeAddr := util.GetNodeInternalIP(*node)
for _, ip := range strings.Split(ipStr, ",") {
if util.CheckProtocol(nodeAddr) == util.CheckProtocol(ip) {
err = c.ovnClient.AddStaticRoute("", nodeAddr, ip, c.config.ClusterRouter, util.NormalRouteType)
if err != nil {
if ip == "" {
continue
}
nodeIP := nodeIPv4
if util.CheckProtocol(ip) == kubeovnv1.ProtocolIPv6 {
nodeIP = nodeIPv6
}
if nodeIP != "" {
if err = c.ovnClient.AddStaticRoute("", nodeIP, ip, c.config.ClusterRouter, util.NormalRouteType); err != nil {
klog.Errorf("failed to add static router from node to ovn0: %v", err)
return err
}
......
......@@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"k8s.io/apimachinery/pkg/labels"
"os"
"os/exec"
"reflect"
......@@ -13,6 +12,7 @@ import (
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
......@@ -77,7 +77,13 @@ func (c *Controller) resyncInterConnection() {
return
}
for _, node := range nodes {
blackList = append(blackList, util.GetNodeInternalIP(*node))
ipv4, ipv6 := util.GetNodeInternalIP(*node)
if ipv4 != "" {
blackList = append(blackList, ipv4)
}
if ipv6 != "" {
blackList = append(blackList, ipv6)
}
}
if err := c.ovnClient.SetICAutoRoute(autoRoute, blackList); err != nil {
klog.Errorf("failed to config auto route, %v", err)
......
......@@ -1005,16 +1005,14 @@ func (c *Controller) validatePodIP(podName, subnetName, ipv4, ipv6 string) (bool
}
for _, node := range nodes {
if nodeIP := util.GetNodeInternalIP(*node); nodeIP != "" {
msg := fmt.Sprintf("IP address (%s) assigned to pod %s is the same with internal IP address of node %s, reallocating...", nodeIP, podName, node.Name)
if nodeIP == ipv4 {
klog.Error(msg)
return false, true, nil
}
if nodeIP == ipv6 {
klog.Error(msg)
return true, false, nil
}
nodeIPv4, nodeIPv6 := util.GetNodeInternalIP(*node)
if ipv4 != "" && ipv4 == nodeIPv4 {
klog.Errorf("IP address (%s) assigned to pod %s is the same with internal IP address of node %s, reallocating...", ipv4, podName, node.Name)
return false, true, nil
}
if ipv6 != "" && ipv6 == nodeIPv6 {
klog.Errorf("IP address (%s) assigned to pod %s is the same with internal IP address of node %s, reallocating...", ipv6, podName, node.Name)
return true, false, nil
}
}
}
......
......@@ -65,8 +65,7 @@ type Controller struct {
iptable map[string]*iptables.IPTables
ipset map[string]*ipsets.IPSets
protocol string
internalIP string
protocol string
}
// NewController init a daemon controller
......@@ -109,7 +108,6 @@ func NewController(config *Configuration, podInformerFactory informers.SharedInf
return nil, err
}
controller.protocol = util.CheckProtocol(node.Annotations[util.IpAddressAnnotation])
controller.internalIP = util.GetNodeInternalIP(*node)
controller.iptable = make(map[string]*iptables.IPTables)
controller.ipset = make(map[string]*ipsets.IPSets)
......
......@@ -397,7 +397,12 @@ func (c *Controller) setIptables() error {
return err
}
hostIP := util.GetNodeInternalIP(*node)
nodeIPv4, nodeIPv6 := util.GetNodeInternalIP(*node)
nodeIPs := map[string]string{
kubeovnv1.ProtocolIPv4: nodeIPv4,
kubeovnv1.ProtocolIPv6: nodeIPv6,
}
subnetNatips, err := c.getEgressNatIpByNode(c.config.NodeName)
if err != nil {
klog.Errorf("failed to get centralized subnets nat ips on node %s, %v", c.config.NodeName, err)
......@@ -409,12 +414,12 @@ func (c *Controller) setIptables() error {
v4AbandonedRules = []util.IPTableRule{
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(`-m set ! --match-set ovn40subnets src -m set ! --match-set ovn40other-node src -m set --match-set ovn40local-pod-ip-nat dst -j RETURN`)},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(`-m set ! --match-set ovn40subnets src -m set ! --match-set ovn40other-node src -m set --match-set ovn40subnets-nat dst -j RETURN`)},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(fmt.Sprintf(`-o ovn0 ! -s %s -m mark --mark 0x4000/0x4000 -j MASQUERADE`, hostIP))},
// {Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(fmt.Sprintf(`-o ovn0 ! -s %s -m mark --mark 0x4000/0x4000 -j MASQUERADE`, nodeIPv4))},
}
v6AbandonedRules = []util.IPTableRule{
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set ! --match-set ovn60subnets src -m set ! --match-set ovn60other-node src -m set --match-set ovn60local-pod-ip-nat dst -j RETURN`, " ")},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(`-m set ! --match-set ovn60subnets src -m set ! --match-set ovn60other-node src -m set --match-set ovn60subnets-nat dst -j RETURN`, " ")},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Split(fmt.Sprintf(`-o ovn0 ! -s %s -m mark --mark 0x4000/0x4000 -j MASQUERADE`, hostIP), " ")},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(`-m set ! --match-set ovn60subnets src -m set ! --match-set ovn60other-node src -m set --match-set ovn60local-pod-ip-nat dst -j RETURN`)},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(`-m set ! --match-set ovn60subnets src -m set ! --match-set ovn60other-node src -m set --match-set ovn60subnets-nat dst -j RETURN`)},
// {Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(fmt.Sprintf(`-o ovn0 ! -s %s -m mark --mark 0x4000/0x4000 -j MASQUERADE`, nodeIPv6))},
}
v4Rules = []util.IPTableRule{
......@@ -422,7 +427,7 @@ func (c *Controller) setIptables() error {
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(`-m set --match-set ovn40subnets-nat src -m set ! --match-set ovn40subnets dst -j MASQUERADE`)},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(`-m set --match-set ovn40local-pod-ip-nat src -m set ! --match-set ovn40subnets dst -j MASQUERADE`)},
// external traffic to overlay pod or to service
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(fmt.Sprintf(`! -s %s -m set --match-set ovn40subnets dst -j MASQUERADE`, hostIP))},
// {Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(fmt.Sprintf(`! -s %s -m set --match-set ovn40subnets dst -j MASQUERADE`, nodeIPv4))},
// masq traffic from overlay pod to service
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(`-m mark --mark 0x40000/0x40000 -j MASQUERADE`)},
// mark traffic from overlay pod to service
......@@ -445,7 +450,7 @@ func (c *Controller) setIptables() error {
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(`-m set --match-set ovn60subnets-nat src -m set ! --match-set ovn60subnets dst -j MASQUERADE`)},
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(`-m set --match-set ovn60local-pod-ip-nat src -m set ! --match-set ovn60subnets dst -j MASQUERADE`)},
// external traffic to overlay pod or to service
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(fmt.Sprintf(`! -s %s -m set --match-set ovn60subnets dst -j MASQUERADE`, hostIP))},
// {Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(fmt.Sprintf(`! -s %s -m set --match-set ovn60subnets dst -j MASQUERADE`, nodeIPv6))},
// masq traffic from overlay pod to service
{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(`-m mark --mark 0x40000/0x40000 -j MASQUERADE`)},
// mark traffic from overlay pod to service
......@@ -492,6 +497,16 @@ func (c *Controller) setIptables() error {
matchset = "ovn60subnets"
}
if nodeIP := nodeIPs[protocol]; nodeIP != "" {
abandonedRules = append(abandonedRules, util.IPTableRule{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(fmt.Sprintf(`-o ovn0 ! -s %s -m mark --mark 0x4000/0x4000 -j MASQUERADE`, nodeIP))})
rules := make([]util.IPTableRule, len(iptableRules)+1)
copy(rules[:2], iptableRules[:2])
rules[2] = util.IPTableRule{Table: "nat", Chain: "POSTROUTING", Rule: strings.Fields(fmt.Sprintf(`! -s %s -m set --match-set %s dst -j MASQUERADE`, nodeIP, matchset))}
copy(rules[3:], iptableRules[2:])
iptableRules = rules
}
// delete abandoned iptables rules
for _, iptRule := range abandonedRules {
exists, err := c.iptable[protocol].Exists(iptRule.Table, iptRule.Chain, iptRule.Rule...)
......@@ -535,11 +550,6 @@ func (c *Controller) setIptables() error {
}
for _, iptRule := range iptableRules {
if util.ContainsString(iptRule.Rule, "ovn0") && protocol != util.CheckProtocol(hostIP) {
klog.V(3).Infof("ignore check iptable rule, protocol %v, hostIP %v", protocol, hostIP)
continue
}
exists, err := c.iptable[protocol].Exists(iptRule.Table, iptRule.Chain, iptRule.Rule...)
if err != nil {
klog.Errorf("check iptable rule exist failed, %+v", err)
......
package util
import v1 "k8s.io/api/core/v1"
import (
"strings"
func GetNodeInternalIP(node v1.Node) string {
var nodeAddr string
v1 "k8s.io/api/core/v1"
)
func GetNodeInternalIP(node v1.Node) (ipv4, ipv6 string) {
var ips []string
for _, addr := range node.Status.Addresses {
if addr.Type == v1.NodeInternalIP {
nodeAddr = addr.Address
break
ips = append(ips, addr.Address)
}
}
return nodeAddr
return SplitStringIP(strings.Join(ips, ","))
}
......@@ -20,6 +20,18 @@ import (
"github.com/kubeovn/kube-ovn/test/e2e/framework"
)
func nodeIPs(node corev1.Node) []string {
nodeIPv4, nodeIPv6 := util.GetNodeInternalIP(node)
var nodeIPs []string
if nodeIPv4 != "" {
nodeIPs = append(nodeIPs, nodeIPv4)
}
if nodeIPv6 != "" {
nodeIPs = append(nodeIPs, nodeIPv6)
}
return nodeIPs
}
func curlArgs(ip string, port int32) string {
if util.CheckProtocol(ip) == kubeovn.ProtocolIPv6 {
ip = fmt.Sprintf("-g -6 [%s]", ip)
......@@ -71,22 +83,26 @@ var _ = Describe("[Service]", func() {
Context("service with host network endpoints", func() {
It("container to ClusterIP", func() {
ip, port := hostService.Spec.ClusterIP, hostService.Spec.Ports[0].Port
for _, pod := range containerPods.Items {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
port := hostService.Spec.Ports[0].Port
for _, ip := range hostService.Spec.ClusterIPs {
for _, pod := range containerPods.Items {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
}
}
})
It("host to ClusterIP", func() {
ip, port := hostService.Spec.ClusterIP, hostService.Spec.Ports[0].Port
for _, pod := range hostPods.Items {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
port := hostService.Spec.Ports[0].Port
for _, ip := range hostService.Spec.ClusterIPs {
for _, pod := range hostPods.Items {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
}
}
})
......@@ -96,11 +112,12 @@ var _ = Describe("[Service]", func() {
nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, node := range nodes.Items {
nodeIP := util.GetNodeInternalIP(node)
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
for _, nodeIP := range nodeIPs(node) {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
}
}
}
})
......@@ -111,11 +128,12 @@ var _ = Describe("[Service]", func() {
nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, node := range nodes.Items {
nodeIP := util.GetNodeInternalIP(node)
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
for _, nodeIP := range nodeIPs(node) {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
}
}
}
})
......@@ -125,33 +143,38 @@ var _ = Describe("[Service]", func() {
nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, node := range nodes.Items {
nodeIP := util.GetNodeInternalIP(node)
output, err := exec.Command("curl", strings.Fields(curlArgs(nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
for _, nodeIP := range nodeIPs(node) {
output, err := exec.Command("curl", strings.Fields(curlArgs(nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
}
}
})
})
Context("service with container network endpoints", func() {
It("container to ClusterIP", func() {
ip, port := hostService.Spec.ClusterIP, hostService.Spec.Ports[0].Port
for _, pod := range containerPods.Items {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
port := hostService.Spec.Ports[0].Port
for _, ip := range hostService.Spec.ClusterIPs {
for _, pod := range containerPods.Items {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
}
}
})
It("host to ClusterIP", func() {
ip, port := hostService.Spec.ClusterIP, hostService.Spec.Ports[0].Port
for _, pod := range hostPods.Items {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
port := hostService.Spec.Ports[0].Port
for _, ip := range hostService.Spec.ClusterIPs {
for _, pod := range hostPods.Items {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
}
}
})
......@@ -161,11 +184,12 @@ var _ = Describe("[Service]", func() {
nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, node := range nodes.Items {
nodeIP := util.GetNodeInternalIP(node)
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
for _, nodeIP := range nodeIPs(node) {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
}
}
}
})
......@@ -176,11 +200,12 @@ var _ = Describe("[Service]", func() {
nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, node := range nodes.Items {
nodeIP := util.GetNodeInternalIP(node)
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
for _, nodeIP := range nodeIPs(node) {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
}
}
}
})
......@@ -190,33 +215,38 @@ var _ = Describe("[Service]", func() {
nodes, err := f.KubeClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, node := range nodes.Items {
nodeIP := util.GetNodeInternalIP(node)
output, err := exec.Command("curl", strings.Fields(curlArgs(nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
for _, nodeIP := range nodeIPs(node) {
output, err := exec.Command("curl", strings.Fields(curlArgs(nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
}
}
})
})
Context("service with local external traffic policy", func() {
It("container to ClusterIP", func() {
ip, port := hostService.Spec.ClusterIP, hostService.Spec.Ports[0].Port
for _, pod := range containerPods.Items {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
port := hostService.Spec.Ports[0].Port
for _, ip := range hostService.Spec.ClusterIPs {
for _, pod := range containerPods.Items {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
}
}
})
It("host to ClusterIP", func() {
ip, port := hostService.Spec.ClusterIP, hostService.Spec.Ports[0].Port
for _, pod := range hostPods.Items {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
port := hostService.Spec.Ports[0].Port
for _, ip := range hostService.Spec.ClusterIPs {
for _, pod := range hostPods.Items {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, ip, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
}
}
})
......@@ -243,15 +273,16 @@ var _ = Describe("[Service]", func() {
}
}
nodeIP := util.GetNodeInternalIP(node)
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
if hasEndpoint {
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
} else {
Expect(err).To(HaveOccurred())
Expect(outputStr).To(HavePrefix("000"))
for _, nodeIP := range nodeIPs(node) {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
if hasEndpoint {
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
} else {
Expect(err).To(HaveOccurred())
Expect(outputStr).To(HavePrefix("000"))
}
}
}
}
......@@ -282,15 +313,16 @@ var _ = Describe("[Service]", func() {
}
}
nodeIP := util.GetNodeInternalIP(node)
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
if shouldSucceed {
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
} else {
Expect(err).To(HaveOccurred())
Expect(outputStr).To(HavePrefix("000"))
for _, nodeIP := range nodeIPs(node) {
output, err := exec.Command("kubectl", strings.Fields(kubectlArgs(pod.Name, nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
if shouldSucceed {
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
} else {
Expect(err).To(HaveOccurred())
Expect(outputStr).To(HavePrefix("000"))
}
}
}
}
......@@ -318,15 +350,16 @@ var _ = Describe("[Service]", func() {
}
}
nodeIP := util.GetNodeInternalIP(node)
output, err := exec.Command("curl", strings.Fields(curlArgs(nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
if hasEndpoint {
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
} else {
Expect(err).To(HaveOccurred())
Expect(outputStr).To(Equal("000"))
for _, nodeIP := range nodeIPs(node) {
output, err := exec.Command("curl", strings.Fields(curlArgs(nodeIP, port))...).CombinedOutput()
outputStr := string(bytes.TrimSpace(output))
if hasEndpoint {
Expect(err).NotTo(HaveOccurred(), outputStr)
Expect(outputStr).To(Equal("200"))
} else {
Expect(err).To(HaveOccurred())
Expect(outputStr).To(Equal("000"))
}
}
}
})
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment