Bumping k8s dependencies to 1.13

This commit is contained in:
Cheng Xing
2018-11-16 14:08:25 -08:00
parent 305407125c
commit b4c0b68ec7
8002 changed files with 884099 additions and 276228 deletions

View File

@@ -14,7 +14,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/ipvs/testing:go_default_library",
"//pkg/proxy/util:go_default_library",
@@ -25,10 +24,11 @@ go_test(
"//pkg/util/iptables/testing:go_default_library",
"//pkg/util/ipvs:go_default_library",
"//pkg/util/ipvs/testing:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],
@@ -37,48 +37,15 @@ go_test(
go_library(
name = "go_default_library",
srcs = [
"graceful_termination.go",
"ipset.go",
"netlink.go",
"netlink_linux.go",
"netlink_unsupported.go",
"proxier.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"netlink_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"netlink_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"netlink_unsupported.go",
],
"//conditions:default": [],
}),
],
importpath = "k8s.io/kubernetes/pkg/proxy/ipvs",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy:go_default_library",
"//pkg/proxy/healthcheck:go_default_library",
"//pkg/proxy/metrics:go_default_library",
@@ -91,12 +58,12 @@ go_library(
"//pkg/util/net:go_default_library",
"//pkg/util/sysctl:go_default_library",
"//pkg/util/version:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:linux": [

View File

@@ -2,6 +2,7 @@ reviewers:
- thockin
- brendandburns
- m1093782566
- Lion-Wei
approvers:
- thockin
- brendandburns

View File

@@ -27,177 +27,236 @@ IPVS runs on a host and acts as a load balancer in front of a cluster of real se
and UDP-based services to the real servers, and make services of real servers appear as virtual services on a single IP address.
## IPVS vs. IPTABLES
IPVS mode was introduced in Kubernetes v1.8 and goes beta in v1.9. IPTABLES mode was added in v1.1 and become the default operating mode since v1.2. Both IPVS and IPTABLES are based on `netfilter`.
IPVS mode was introduced in Kubernetes v1.8, goes beta in v1.9 and GA in v1.11. IPTABLES mode was added in v1.1 and become the default operating mode since v1.2. Both IPVS and IPTABLES are based on `netfilter`.
Differences between IPVS mode and IPTABLES mode are as follows:
1. IPVS provides better scalability and performance for large clusters.
1. IPVS provides better scalability and performance for large clusters.
2. IPVS supports more sophisticated load balancing algorithms than iptables (least load, least connections, locality, weighted, etc.).
2. IPVS supports more sophisticated load balancing algorithms than iptables (least load, least connections, locality, weighted, etc.).
3. IPVS supports server health checking and connection retries, etc.
### When ipvs falls back to iptables
IPVS proxier will employ iptables in doing packet filtering, SNAT and supporting NodePort type service. Specifically, ipvs proxier will fall back on iptables in the following 4 scenarios.
IPVS proxier will employ iptables in doing packet filtering, SNAT or masquerade.
Specifically, ipvs proxier will use ipset to store source or destination address of traffics that need DROP or do masquared, to make sure the number of iptables rules be constant, no metter how many services we have.
Here is the table of ipset sets that ipvs proxier used.
| set name | members | usage |
| :----------------------------- | ---------------------------------------- | ---------------------------------------- |
| KUBE-CLUSTER-IP | All service IP + port | Mark-Masq for cases that `masquerade-all=true` or `clusterCIDR` specified |
| KUBE-LOOP-BACK | All service IP + port + IP | masquerade for solving hairpin purpose |
| KUBE-EXTERNAL-IP | service external IP + port | masquerade for packages to external IPs |
| KUBE-LOAD-BALANCER | load balancer ingress IP + port | masquerade for packages to load balancer type service |
| KUBE-LOAD-BALANCER-LOCAL | LB ingress IP + port with `externalTrafficPolicy=local` | accept packages to load balancer with `externalTrafficPolicy=local` |
| KUBE-LOAD-BALANCER-FW | load balancer ingress IP + port with `loadBalancerSourceRanges` | package filter for load balancer with `loadBalancerSourceRanges` specified |
| KUBE-LOAD-BALANCER-SOURCE-CIDR | load balancer ingress IP + port + source CIDR | package filter for load balancer with `loadBalancerSourceRanges` specified |
| KUBE-NODE-PORT-TCP | nodeport type service TCP port | masquerade for packets to nodePort(TCP) |
| KUBE-NODE-PORT-LOCAL-TCP | nodeport type service TCP port with `externalTrafficPolicy=local` | accept packages to nodeport service with `externalTrafficPolicy=local` |
| KUBE-NODE-PORT-UDP | nodeport type service UDP port | masquerade for packets to nodePort(UDP) |
| KUBE-NODE-PORT-LOCAL-UDP | nodeport type service UDP port with `externalTrafficPolicy=local` | accept packages to nodeport service with `externalTrafficPolicy=local` |
IPVS proxier will fall back on iptables in the following scenarios.
**1. kube-proxy starts with --masquerade-all=true**
If kube-proxy starts with `--masquerade-all=true`, ipvs proxier will masquerade all traffic accessing service Cluster IP, which behaves the same as what iptables proxier. Suppose there is a service with Cluster IP `10.244.5.1` and port `8080`, then the iptables installed by ipvs proxier should be like what is shown below.
If kube-proxy starts with `--masquerade-all=true`, ipvs proxier will masquerade all traffic accessing service Cluster IP, which behaves the same as what iptables proxier. Suppose kube-proxy have flag `--masquerade-all=true` specified, then the iptables installed by ipvs proxier should be like what is shown below.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
Chain KUBE-MARK-DROP (0 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (6 references)
target prot opt source destination
Chain KUBE-MARK-MASQ (2 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- 0.0.0.0/0 10.244.5.1 /* default/foo:http cluster IP */ tcp dpt:8080
target prot opt source destination
KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-CLUSTER-IP dst,dst
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-CLUSTER-IP dst,dst
```
**2. Specify cluster CIDR in kube-proxy startup**
If kube-proxy starts with `--cluster-cidr=<cidr>`, ipvs proxier will masquerade off-cluster traffic accessing service Cluster IP, which behaves the same as what iptables proxier. Suppose kube-proxy is provided with the cluster cidr `10.244.16.0/24`, and service Cluster IP is `10.244.5.1` and port is `8080`, then the iptables installed by ipvs proxier should be like what is shown below.
If kube-proxy starts with `--cluster-cidr=<cidr>`, ipvs proxier will masquerade off-cluster traffic accessing service Cluster IP, which behaves the same as what iptables proxier. Suppose kube-proxy is provided with the cluster cidr `10.244.16.0/24`, then the iptables installed by ipvs proxier should be like what is shown below.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
Chain KUBE-MARK-DROP (0 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (6 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- !10.244.16.0/24 10.244.5.1 /* default/foo:http cluster IP */ tcp dpt:8080
```
**3. Load Balancer Source Ranges is specified for LB type service**
When service's `LoadBalancerStatus.ingress.IP` is not empty and service's `LoadBalancerSourceRanges` is specified, ipvs proxier will install iptables which looks like what is shown below.
Suppose service's `LoadBalancerStatus.ingress.IP` is `10.96.1.2` and service's `LoadBalancerSourceRanges` is `10.120.2.0/24`.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
Chain KUBE-MARK-DROP (0 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (6 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-SERVICES (2 references)
target prot opt source destination
ACCEPT tcp -- 10.120.2.0/24 10.96.1.2 /* default/foo:http loadbalancer IP */ tcp dpt:8080
DROP tcp -- 0.0.0.0/0 10.96.1.2 /* default/foo:http loadbalancer IP */ tcp dpt:8080
```
**4. Support NodePort type service**
For supporting NodePort type service, ipvs will recruit the existing implementation in iptables proxier. For example,
```shell
# kubectl describe svc nginx-service
Name: nginx-service
...
Type: NodePort
IP: 10.101.28.148
Port: http 3080/TCP
NodePort: http 31604/TCP
Endpoints: 172.17.0.2:80
Session Affinity: None
# iptables -t nat -nL
[root@100-106-179-225 ~]# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- !172.16.0.0/16 10.101.28.148 /* default/nginx-service:http cluster IP */ tcp dpt:3080
KUBE-SVC-6IM33IEVEEV7U3GP tcp -- 0.0.0.0/0 10.101.28.148 /* default/nginx-service:http cluster IP */ tcp dpt:3080
KUBE-NODEPORTS all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL
Chain KUBE-NODEPORTS (1 references)
target prot opt source destination
KUBE-MARK-MASQ tcp -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */ tcp dpt:31604
KUBE-SVC-6IM33IEVEEV7U3GP tcp -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */ tcp dpt:31604
Chain KUBE-SVC-6IM33IEVEEV7U3GP (2 references)
target prot opt source destination
KUBE-SEP-Q3UCPZ54E6Q2R4UT all -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */
Chain KUBE-SEP-Q3UCPZ54E6Q2R4UT (1 references)
target prot opt source destination
KUBE-MARK-MASQ all -- 172.17.0.2 0.0.0.0/0 /* default/nginx-service:http */
DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-service:http */ tcp to:172.17.0.2:80
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-MARK-MASQ (3 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ all -- !10.244.16.0/24 0.0.0.0/0 match-set KUBE-CLUSTER-IP dst,dst
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-CLUSTER-IP dst,dst
```
**3. Load Balancer type service**
For loadBalancer type service, ipvs proxier will install iptables with match of ipset `KUBE-LOAD-BALANCER`.
Specially when service's `LoadBalancerSourceRanges` is specified or specified `externalTrafficPolicy=local`,
ipvs proxier will create ipset sets `KUBE-LOAD-BALANCER-LOCAL`/`KUBE-LOAD-BALANCER-FW`/`KUBE-LOAD-BALANCER-SOURCE-CIDR`
and install iptables accordingly, which should looks like what is shown below.
```shell
# iptables -t nat -nL
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-FIREWALL (1 references)
target prot opt source destination
RETURN all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER-SOURCE-CIDR dst,dst,src
KUBE-MARK-DROP all -- 0.0.0.0/0 0.0.0.0/0
Chain KUBE-LOAD-BALANCER (1 references)
target prot opt source destination
KUBE-FIREWALL all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER-FW dst,dst
RETURN all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER-LOCAL dst,dst
KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0
Chain KUBE-MARK-DROP (1 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x8000
Chain KUBE-MARK-MASQ (2 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-LOAD-BALANCER all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER dst,dst
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOAD-BALANCER dst,dst
```
**4. NodePort type service**
For NodePort type service, ipvs proxier will install iptables with match of ipset `KUBE-NODE-PORT-TCP/KUBE-NODE-PORT-UDP`.
When specified `externalTrafficPolicy=local`,ipvs proxier will create ipset sets `KUBE-NODE-PORT-LOCAL-TC/KUBE-NODE-PORT-LOCAL-UDP`
and install iptables accordingly, which should looks like what is shown below.
Suppose service with TCP type nodePort.
```shell
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-MARK-MASQ (2 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-NODE-PORT (1 references)
target prot opt source destination
RETURN all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-NODE-PORT-LOCAL-TCP dst
KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-NODE-PORT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-NODE-PORT-TCP dst
```
**5. Service with externalIPs specified**
For service with `externalIPs` specified, ipvs proxier will install iptables with match of ipset `KUBE-EXTERNAL-IP`,
Suppose we have service with `externalIPs` specified, iptables rules should looks like what is shown below.
```shell
Chain PREROUTING (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain OUTPUT (policy ACCEPT)
target prot opt source destination
KUBE-SERVICES all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */
Chain POSTROUTING (policy ACCEPT)
target prot opt source destination
KUBE-POSTROUTING all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */
Chain KUBE-MARK-MASQ (2 references)
target prot opt source destination
MARK all -- 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000
Chain KUBE-POSTROUTING (1 references)
target prot opt source destination
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ mark match 0x4000/0x4000
MASQUERADE all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-LOOP-BACK dst,dst,src
Chain KUBE-SERVICES (2 references)
target prot opt source destination
KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst PHYSDEV match ! --physdev-is-in ADDRTYPE match src-type !LOCAL
ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst ADDRTYPE match dst-type LOCAL
```
## Run kube-proxy in ipvs mode
Currently, local-up scripts, GCE scripts and kubeadm support switching IPVS proxy mode via exporting environment variables or specifying flags.
Currently, local-up scripts, GCE scripts and kubeadm support switching IPVS proxy mode via exporting environment variables or specifying flags.
### Prerequisite
Ensure IPVS required kernel modules
@@ -248,13 +307,13 @@ lsmod | grep -e ipvs -e nf_conntrack_ipv4
cut -f1 -d " " /proc/modules | grep -e ip_vs -e nf_conntrack_ipv4
```
Packages such as `ipset` should also be installed on the node before using IPVS mode.
Packages such as `ipset` should also be installed on the node before using IPVS mode.
Kube-proxy will fall back to IPTABLES mode if those requirements are not met.
### Local UP Cluster
Kube-proxy will run in iptables mode by default in a [local-up cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md).
Kube-proxy will run in iptables mode by default in a [local-up cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md).
To use IPVS mode, users should export the env `KUBE_PROXY_MODE=ipvs` to specify the ipvs mode before [starting the cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md#starting-the-cluster):
```shell
@@ -266,7 +325,7 @@ export KUBE_PROXY_MODE=ipvs
Similar to local-up cluster, kube-proxy in [clusters running on GCE](https://kubernetes.io/docs/getting-started-guides/gce/) run in iptables mode by default. Users need to export the env `KUBE_PROXY_MODE=ipvs` before [starting a cluster](https://kubernetes.io/docs/getting-started-guides/gce/#starting-a-cluster):
```shell
#before running one of the commmands chosen to start a cluster:
#before running one of the commands chosen to start a cluster:
# curl -sS https://get.k8s.io | bash
# wget -q -O - https://get.k8s.io | bash
# cluster/kube-up.sh
@@ -275,20 +334,19 @@ export KUBE_PROXY_MODE=ipvs
### Cluster Created by Kubeadm
Kube-proxy will run in iptables mode by default in a cluster deployed by [kubeadm](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/).
If you are using kubeadm with a [configuration file](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file), you can specify the ipvs mode adding `SupportIPVSProxyMode: true` below the `kubeProxy` field.
If you are using kubeadm with a [configuration file](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file), you have to add `mode: ipvs` and also add `SupportIPVSProxyMode: true` below the `kubeProxy` field as part of the kubeadm configuration.
```json
kind: MasterConfiguration
apiVersion: kubeadm.k8s.io/v1alpha1
...
kubeProxy:
config:
featureGates: SupportIPVSProxyMode=true
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
...
```
Note that in Kubernetes 1.11 and later, `SupportIPVSProxyMode` is set to `true` by default.
before running
`kube init --config <path_to_configuration_file>`
@@ -301,7 +359,7 @@ kubeadm init --feature-gates=SupportIPVSProxyMode=true
to specify the ipvs mode before deploying the cluster.
**Notes**
**Notes**
If ipvs mode is successfully on, you should see ipvs proxy rules (use `ipvsadm`) like
```shell
# ipvsadm -ln
@@ -316,7 +374,7 @@ or similar logs occur in kube-proxy logs (for example, `/tmp/kube-proxy.log` for
Using ipvs Proxier.
```
While there is no ipvs proxy rules or the following logs ocuurs indicate that the kube-proxy fails to use ipvs mode:
While there is no ipvs proxy rules or the following logs ocuurs indicate that the kube-proxy fails to use ipvs mode:
```
Can't use ipvs proxier, trying iptables proxier
Using iptables Proxier.
@@ -352,7 +410,7 @@ UDP 10.0.0.10:53 rr
### Why kube-proxy can't start IPVS mode
Use the following check list to help you solve the problems:
Use the following check list to help you solve the problems:
**1. Enable IPVS feature gateway**

View File

@@ -0,0 +1,220 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ipvs
import (
"sync"
"time"
"fmt"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/wait"
utilipvs "k8s.io/kubernetes/pkg/util/ipvs"
)
const (
rsGracefulDeletePeriod = 15 * time.Minute
rsCheckDeleteInterval = 1 * time.Minute
)
// listItem stores real server information and the process time.
// If nothing special happened, real server will be delete after process time.
type listItem struct {
VirtualServer *utilipvs.VirtualServer
RealServer *utilipvs.RealServer
}
// String return the unique real server name(with virtual server information)
func (g *listItem) String() string {
return GetUniqueRSName(g.VirtualServer, g.RealServer)
}
// GetUniqueRSName return a string type unique rs name with vs information
func GetUniqueRSName(vs *utilipvs.VirtualServer, rs *utilipvs.RealServer) string {
return vs.String() + "/" + rs.String()
}
type graceTerminateRSList struct {
lock sync.Mutex
list map[string]*listItem
}
// add push an new element to the rsList
func (q *graceTerminateRSList) add(rs *listItem) bool {
q.lock.Lock()
defer q.lock.Unlock()
uniqueRS := rs.String()
if _, ok := q.list[uniqueRS]; ok {
return false
}
glog.V(5).Infof("Adding rs %v to graceful delete rsList", rs)
q.list[uniqueRS] = rs
return true
}
// remove remove an element from the rsList
func (q *graceTerminateRSList) remove(rs *listItem) bool {
q.lock.Lock()
defer q.lock.Unlock()
uniqueRS := rs.String()
if _, ok := q.list[uniqueRS]; ok {
return false
}
delete(q.list, uniqueRS)
return true
}
func (q *graceTerminateRSList) flushList(handler func(rsToDelete *listItem) (bool, error)) bool {
success := true
for name, rs := range q.list {
deleted, err := handler(rs)
if err != nil {
glog.Errorf("Try delete rs %q err: %v", name, err)
success = false
}
if deleted {
glog.Infof("lw: remote out of the list: %s", name)
q.remove(rs)
}
}
return success
}
// exist check whether the specified unique RS is in the rsList
func (q *graceTerminateRSList) exist(uniqueRS string) (*listItem, bool) {
q.lock.Lock()
defer q.lock.Unlock()
if rs, ok := q.list[uniqueRS]; ok {
return rs, true
}
return nil, false
}
// GracefulTerminationManager manage rs graceful termination information and do graceful termination work
// rsList is the rs list to graceful termination, ipvs is the ipvsinterface to do ipvs delete/update work
type GracefulTerminationManager struct {
rsList graceTerminateRSList
ipvs utilipvs.Interface
}
// NewGracefulTerminationManager create a gracefulTerminationManager to manage ipvs rs graceful termination work
func NewGracefulTerminationManager(ipvs utilipvs.Interface) *GracefulTerminationManager {
l := make(map[string]*listItem)
return &GracefulTerminationManager{
rsList: graceTerminateRSList{
list: l,
},
ipvs: ipvs,
}
}
// InTerminationList to check whether specified unique rs name is in graceful termination list
func (m *GracefulTerminationManager) InTerminationList(uniqueRS string) bool {
_, exist := m.rsList.exist(uniqueRS)
return exist
}
// GracefulDeleteRS to update rs weight to 0, and add rs to graceful terminate list
func (m *GracefulTerminationManager) GracefulDeleteRS(vs *utilipvs.VirtualServer, rs *utilipvs.RealServer) error {
// Try to delete rs before add it to graceful delete list
ele := &listItem{
VirtualServer: vs,
RealServer: rs,
}
deleted, err := m.deleteRsFunc(ele)
if err != nil {
glog.Errorf("Delete rs %q err: %v", ele.String(), err)
}
if deleted {
return nil
}
rs.Weight = 0
err = m.ipvs.UpdateRealServer(vs, rs)
if err != nil {
return err
}
glog.V(5).Infof("Adding an element to graceful delete rsList: %+v", ele)
m.rsList.add(ele)
return nil
}
func (m *GracefulTerminationManager) deleteRsFunc(rsToDelete *listItem) (bool, error) {
glog.Infof("Trying to delete rs: %s", rsToDelete.String())
rss, err := m.ipvs.GetRealServers(rsToDelete.VirtualServer)
if err != nil {
return false, err
}
for _, rs := range rss {
if rsToDelete.RealServer.Equal(rs) {
if rs.ActiveConn != 0 {
return false, nil
}
glog.Infof("Deleting rs: %s", rsToDelete.String())
err := m.ipvs.DeleteRealServer(rsToDelete.VirtualServer, rs)
if err != nil {
return false, fmt.Errorf("Delete destination %q err: %v", rs.String(), err)
}
return true, nil
}
}
return true, fmt.Errorf("Failed to delete rs %q, can't find the real server", rsToDelete.String())
}
func (m *GracefulTerminationManager) tryDeleteRs() {
if !m.rsList.flushList(m.deleteRsFunc) {
glog.Errorf("Try flush graceful termination list err")
}
}
// MoveRSOutofGracefulDeleteList to delete an rs and remove it from the rsList immediately
func (m *GracefulTerminationManager) MoveRSOutofGracefulDeleteList(uniqueRS string) error {
rsToDelete, find := m.rsList.exist(uniqueRS)
if !find || rsToDelete == nil {
return fmt.Errorf("failed to find rs: %q", uniqueRS)
}
err := m.ipvs.DeleteRealServer(rsToDelete.VirtualServer, rsToDelete.RealServer)
if err != nil {
return err
}
m.rsList.remove(rsToDelete)
return nil
}
// Run start a goroutine to try to delete rs in the graceful delete rsList with an interval 1 minute
func (m *GracefulTerminationManager) Run() {
// before start, add leftover in delete rs to graceful delete rsList
vss, err := m.ipvs.GetVirtualServers()
if err != nil {
glog.Errorf("IPVS graceful delete manager failed to get IPVS virtualserver")
}
for _, vs := range vss {
rss, err := m.ipvs.GetRealServers(vs)
if err != nil {
glog.Errorf("IPVS graceful delete manager failed to get %v realserver", vs)
continue
}
for _, rs := range rss {
m.GracefulDeleteRS(vs, rs)
}
}
go wait.Until(m.tryDeleteRs, rsCheckDeleteInterval, wait.NeverStop)
}

View File

@@ -64,6 +64,12 @@ const (
kubeNodePortLocalSetUDPComment = "Kubernetes nodeport UDP port with externalTrafficPolicy=local"
kubeNodePortLocalSetUDP = "KUBE-NODE-PORT-LOCAL-UDP"
kubeNodePortSetSCTPComment = "Kubernetes nodeport SCTP port for masquerade purpose"
kubeNodePortSetSCTP = "KUBE-NODE-PORT-SCTP"
kubeNodePortLocalSetSCTPComment = "Kubernetes nodeport SCTP port with externalTrafficPolicy=local"
kubeNodePortLocalSetSCTP = "KUBE-NODE-PORT-LOCAL-SCTP"
)
// IPSetVersioner can query the current ipset version.

View File

@@ -179,6 +179,26 @@ func TestSyncIPSetEntries(t *testing.T) {
currentEntries: []string{"80", "9090", "8081", "8082"},
expectedEntries: []string{"8080"},
},
{ // case 12
set: &utilipset.IPSet{
Name: "sctp-1",
},
setType: utilipset.HashIPPort,
ipv6: false,
activeEntries: []string{"172.17.0.4,sctp:80"},
currentEntries: nil,
expectedEntries: []string{"172.17.0.4,sctp:80"},
},
{ // case 1
set: &utilipset.IPSet{
Name: "sctp-2",
},
setType: utilipset.HashIPPort,
ipv6: true,
activeEntries: []string{"FE80::0202:B3FF:FE1E:8329,sctp:80"},
currentEntries: []string{"FE80::0202:B3FF:FE1E:8329,sctp:80"},
expectedEntries: []string{"FE80::0202:B3FF:FE1E:8329,sctp:80"},
},
}
for i := range testCases {

View File

@@ -30,7 +30,9 @@ type NetLinkHandle interface {
EnsureDummyDevice(devName string) (exist bool, err error)
// DeleteDummyDevice deletes the given dummy device by name.
DeleteDummyDevice(devName string) error
// GetLocalAddresses returns all unique local type IP addresses based on filter device interface. If filter device is not given,
// it will list all unique local type addresses.
GetLocalAddresses(filterDev string) (sets.String, error)
// ListBindAddress will list all IP addresses which are bound in a given interface
ListBindAddress(devName string) ([]string, error)
// GetLocalAddresses returns all unique local type IP addresses based on specified device and filter device
// If device is not specified, it will list all unique local type addresses except filter device addresses
GetLocalAddresses(dev, filterDev string) (sets.String, error)
}

View File

@@ -105,8 +105,25 @@ func (h *netlinkHandle) DeleteDummyDevice(devName string) error {
return h.LinkDel(dummy)
}
// ListBindAddress will list all IP addresses which are bound in a given interface
func (h *netlinkHandle) ListBindAddress(devName string) ([]string, error) {
dev, err := h.LinkByName(devName)
if err != nil {
return nil, fmt.Errorf("error get interface: %s, err: %v", devName, err)
}
addrs, err := h.AddrList(dev, 0)
if err != nil {
return nil, fmt.Errorf("error list bound address of interface: %s, err: %v", devName, err)
}
ips := make([]string, 0)
for _, addr := range addrs {
ips = append(ips, addr.IP.String())
}
return ips, nil
}
// GetLocalAddresses lists all LOCAL type IP addresses from host based on filter device.
// If filter device is not specified, it's equivalent to exec:
// If dev is not specified, it's equivalent to exec:
// $ ip route show table local type local proto kernel
// 10.0.0.1 dev kube-ipvs0 scope host src 10.0.0.1
// 10.0.0.10 dev kube-ipvs0 scope host src 10.0.0.10
@@ -119,20 +136,28 @@ func (h *netlinkHandle) DeleteDummyDevice(devName string) error {
// Then cut the unique src IP fields,
// --> result set: [10.0.0.1, 10.0.0.10, 10.0.0.252, 100.106.89.164, 127.0.0.1, 192.168.122.1]
// If filter device is specified, it's equivalent to exec:
// If dev is specified, it's equivalent to exec:
// $ ip route show table local type local proto kernel dev kube-ipvs0
// 10.0.0.1 scope host src 10.0.0.1
// 10.0.0.10 scope host src 10.0.0.10
// Then cut the unique src IP fields,
// --> result set: [10.0.0.1, 10.0.0.10]
func (h *netlinkHandle) GetLocalAddresses(filterDev string) (sets.String, error) {
linkIndex := -1
if len(filterDev) != 0 {
// If filterDev is specified, the result will discard route of specified device and cut src from other routes.
func (h *netlinkHandle) GetLocalAddresses(dev, filterDev string) (sets.String, error) {
chosenLinkIndex, filterLinkIndex := -1, -1
if dev != "" {
link, err := h.LinkByName(dev)
if err != nil {
return nil, fmt.Errorf("error get device %s, err: %v", filterDev, err)
}
chosenLinkIndex = link.Attrs().Index
} else if filterDev != "" {
link, err := h.LinkByName(filterDev)
if err != nil {
return nil, fmt.Errorf("error get filter device %s, err: %v", filterDev, err)
}
linkIndex = link.Attrs().Index
filterLinkIndex = link.Attrs().Index
}
routeFilter := &netlink.Route{
@@ -142,18 +167,20 @@ func (h *netlinkHandle) GetLocalAddresses(filterDev string) (sets.String, error)
}
filterMask := netlink.RT_FILTER_TABLE | netlink.RT_FILTER_TYPE | netlink.RT_FILTER_PROTOCOL
// find filter device
if linkIndex != -1 {
routeFilter.LinkIndex = linkIndex
// find chosen device
if chosenLinkIndex != -1 {
routeFilter.LinkIndex = chosenLinkIndex
filterMask |= netlink.RT_FILTER_OIF
}
routes, err := h.RouteListFiltered(netlink.FAMILY_ALL, routeFilter, filterMask)
if err != nil {
return nil, fmt.Errorf("error list route table, err: %v", err)
}
res := sets.NewString()
for _, route := range routes {
if route.LinkIndex == filterLinkIndex {
continue
}
if route.Src != nil {
res.Insert(route.Src.String())
}

View File

@@ -52,7 +52,12 @@ func (h *emptyHandle) DeleteDummyDevice(devName string) error {
return fmt.Errorf("netlink is not supported in this platform")
}
// GetLocalAddresses is part of interface.
func (h *emptyHandle) GetLocalAddresses(filterDev string) (sets.String, error) {
// ListBindAddress is part of interface.
func (h *emptyHandle) ListBindAddress(devName string) ([]string, error) {
return nil, fmt.Errorf("netlink is not supported in this platform")
}
// GetLocalAddresses is part of interface.
func (h *emptyHandle) GetLocalAddresses(dev, filterDev string) (sets.String, error) {
return nil, fmt.Errorf("netlink is not supported in this platform")
}

View File

@@ -30,12 +30,11 @@ import (
"github.com/golang/glog"
clientv1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/record"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/healthcheck"
"k8s.io/kubernetes/pkg/proxy/metrics"
@@ -129,26 +128,34 @@ var ipsetInfo = []struct {
{kubeNodePortLocalSetTCP, utilipset.BitmapPort, false, kubeNodePortLocalSetTCPComment},
{kubeNodePortSetUDP, utilipset.BitmapPort, false, kubeNodePortSetUDPComment},
{kubeNodePortLocalSetUDP, utilipset.BitmapPort, false, kubeNodePortLocalSetUDPComment},
{kubeNodePortSetSCTP, utilipset.BitmapPort, false, kubeNodePortSetSCTPComment},
{kubeNodePortLocalSetSCTP, utilipset.BitmapPort, false, kubeNodePortLocalSetSCTPComment},
}
// ipsetWithIptablesChain is the ipsets list with iptables source chain and the chain jump to
// `iptables -t nat -A <from> -m set --match-set <name> <matchType> -j <to>`
// example: iptables -t nat -A KUBE-SERVICES -m set --match-set KUBE-NODE-PORT-TCP dst -j KUBE-NODE-PORT
// ipsets with other match rules will be created Individually.
// Note: kubeNodePortLocalSetTCP must be prior to kubeNodePortSetTCP, the same for UDP.
var ipsetWithIptablesChain = []struct {
name string
from string
to string
matchType string
name string
from string
to string
matchType string
protocolMatch string
}{
{kubeLoopBackIPSet, string(kubePostroutingChain), "MASQUERADE", "dst,dst,src"},
{kubeLoadBalancerSet, string(kubeServicesChain), string(KubeLoadBalancerChain), "dst,dst"},
{kubeLoadbalancerFWSet, string(KubeLoadBalancerChain), string(KubeFireWallChain), "dst,dst"},
{kubeLoadBalancerSourceCIDRSet, string(KubeFireWallChain), "RETURN", "dst,dst,src"},
{kubeLoadBalancerSourceIPSet, string(KubeFireWallChain), "RETURN", "dst,dst,src"},
{kubeLoadBalancerLocalSet, string(KubeLoadBalancerChain), "RETURN", "dst,dst"},
{kubeNodePortSetTCP, string(kubeServicesChain), string(KubeNodePortChain), "dst"},
{kubeNodePortLocalSetTCP, string(KubeNodePortChain), "RETURN", "dst"},
{kubeLoopBackIPSet, string(kubePostroutingChain), "MASQUERADE", "dst,dst,src", ""},
{kubeLoadBalancerSet, string(kubeServicesChain), string(KubeLoadBalancerChain), "dst,dst", ""},
{kubeLoadbalancerFWSet, string(KubeLoadBalancerChain), string(KubeFireWallChain), "dst,dst", ""},
{kubeLoadBalancerSourceCIDRSet, string(KubeFireWallChain), "RETURN", "dst,dst,src", ""},
{kubeLoadBalancerSourceIPSet, string(KubeFireWallChain), "RETURN", "dst,dst,src", ""},
{kubeLoadBalancerLocalSet, string(KubeLoadBalancerChain), "RETURN", "dst,dst", ""},
{kubeNodePortLocalSetTCP, string(KubeNodePortChain), "RETURN", "dst", "tcp"},
{kubeNodePortSetTCP, string(KubeNodePortChain), string(KubeMarkMasqChain), "dst", "tcp"},
{kubeNodePortLocalSetUDP, string(KubeNodePortChain), "RETURN", "dst", "udp"},
{kubeNodePortSetUDP, string(KubeNodePortChain), string(KubeMarkMasqChain), "dst", "udp"},
{kubeNodePortSetSCTP, string(kubeServicesChain), string(KubeNodePortChain), "dst", "sctp"},
{kubeNodePortLocalSetSCTP, string(KubeNodePortChain), "RETURN", "dst", "sctp"},
}
var ipvsModules = []string{
@@ -210,11 +217,12 @@ type Proxier struct {
ipGetter IPGetter
// The following buffers are used to reuse memory and avoid allocations
// that are significantly impacting performance.
iptablesData *bytes.Buffer
natChains *bytes.Buffer
filterChains *bytes.Buffer
natRules *bytes.Buffer
filterRules *bytes.Buffer
iptablesData *bytes.Buffer
filterChainsData *bytes.Buffer
natChains *bytes.Buffer
filterChains *bytes.Buffer
natRules *bytes.Buffer
filterRules *bytes.Buffer
// Added as a member to the struct to allow injection for testing.
netlinkHandle NetLinkHandle
// ipsetList is the list of ipsets that ipvs proxier used.
@@ -223,7 +231,8 @@ type Proxier struct {
nodePortAddresses []string
// networkInterfacer defines an interface for several net library functions.
// Inject for test purpose.
networkInterfacer utilproxy.NetworkInterfacer
networkInterfacer utilproxy.NetworkInterfacer
gracefuldeleteManager *GracefulTerminationManager
}
// IPGetter helps get node network interface IP
@@ -238,7 +247,8 @@ type realIPGetter struct {
}
// NodeIPs returns all LOCAL type IP addresses from host which are taken as the Node IPs of NodePort service.
// Firstly, it will list source IP exists in local route table with `kernel` protocol type. For example,
// It will list source IP exists in local route table with `kernel` protocol type, and filter out IPVS proxier
// created dummy device `kube-ipvs0` For example,
// $ ip route show table local type local proto kernel
// 10.0.0.1 dev kube-ipvs0 scope host src 10.0.0.1
// 10.0.0.10 dev kube-ipvs0 scope host src 10.0.0.10
@@ -248,35 +258,14 @@ type realIPGetter struct {
// 127.0.0.1 dev lo scope host src 127.0.0.1
// 172.17.0.1 dev docker0 scope host src 172.17.0.1
// 192.168.122.1 dev virbr0 scope host src 192.168.122.1
// Then cut the unique src IP fields,
// --> result set1: [10.0.0.1, 10.0.0.10, 10.0.0.252, 100.106.89.164, 127.0.0.1, 192.168.122.1]
// NOTE: For cases where an LB acts as a VIP (e.g. Google cloud), the VIP IP is considered LOCAL, but the protocol
// of the entry is 66, e.g. `10.128.0.6 dev ens4 proto 66 scope host`. Therefore, the rule mentioned above will
// filter these entries out.
// Secondly, as we bind Cluster IPs to the dummy interface in IPVS proxier, we need to filter the them out so that
// we can eventually get the Node IPs. Fortunately, the dummy interface created by IPVS proxier is known as `kube-ipvs0`,
// so we just need to specify the `dev kube-ipvs0` argument in ip route command, for example,
// $ ip route show table local type local proto kernel dev kube-ipvs0
// 10.0.0.1 scope host src 10.0.0.1
// 10.0.0.10 scope host src 10.0.0.10
// Then cut the unique src IP fields,
// --> result set2: [10.0.0.1, 10.0.0.10]
// Finally, Node IP set = set1 - set2
// Then filter out dev==kube-ipvs0, and cut the unique src IP fields,
// Node IP set: [100.106.89.164, 127.0.0.1, 192.168.122.1]
func (r *realIPGetter) NodeIPs() (ips []net.IP, err error) {
// Pass in empty filter device name for list all LOCAL type addresses.
allAddress, err := r.nl.GetLocalAddresses("")
nodeAddress, err := r.nl.GetLocalAddresses("", DefaultDummyDevice)
if err != nil {
return nil, fmt.Errorf("error listing LOCAL type addresses from host, error: %v", err)
}
dummyAddress, err := r.nl.GetLocalAddresses(DefaultDummyDevice)
if err != nil {
return nil, fmt.Errorf("error listing LOCAL type addresses from device: %s, error: %v", DefaultDummyDevice, err)
}
// exclude ip address from dummy interface created by IPVS proxier - they are all Cluster IPs.
nodeAddress := allAddress.Difference(dummyAddress)
// translate ip string to IP
for _, ipStr := range nodeAddress.UnsortedList() {
ips = append(ips, net.ParseIP(ipStr))
@@ -359,37 +348,39 @@ func NewProxier(ipt utiliptables.Interface,
healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps
proxier := &Proxier{
portsMap: make(map[utilproxy.LocalPort]utilproxy.Closeable),
serviceMap: make(proxy.ServiceMap),
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, &isIPv6, recorder),
endpointsMap: make(proxy.EndpointsMap),
endpointsChanges: proxy.NewEndpointChangeTracker(hostname, nil, &isIPv6, recorder),
syncPeriod: syncPeriod,
minSyncPeriod: minSyncPeriod,
excludeCIDRs: excludeCIDRs,
iptables: ipt,
masqueradeAll: masqueradeAll,
masqueradeMark: masqueradeMark,
exec: exec,
clusterCIDR: clusterCIDR,
hostname: hostname,
nodeIP: nodeIP,
portMapper: &listenPortOpener{},
recorder: recorder,
healthChecker: healthChecker,
healthzServer: healthzServer,
ipvs: ipvs,
ipvsScheduler: scheduler,
ipGetter: &realIPGetter{nl: NewNetLinkHandle()},
iptablesData: bytes.NewBuffer(nil),
natChains: bytes.NewBuffer(nil),
natRules: bytes.NewBuffer(nil),
filterChains: bytes.NewBuffer(nil),
filterRules: bytes.NewBuffer(nil),
netlinkHandle: NewNetLinkHandle(),
ipset: ipset,
nodePortAddresses: nodePortAddresses,
networkInterfacer: utilproxy.RealNetwork{},
portsMap: make(map[utilproxy.LocalPort]utilproxy.Closeable),
serviceMap: make(proxy.ServiceMap),
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, &isIPv6, recorder),
endpointsMap: make(proxy.EndpointsMap),
endpointsChanges: proxy.NewEndpointChangeTracker(hostname, nil, &isIPv6, recorder),
syncPeriod: syncPeriod,
minSyncPeriod: minSyncPeriod,
excludeCIDRs: excludeCIDRs,
iptables: ipt,
masqueradeAll: masqueradeAll,
masqueradeMark: masqueradeMark,
exec: exec,
clusterCIDR: clusterCIDR,
hostname: hostname,
nodeIP: nodeIP,
portMapper: &listenPortOpener{},
recorder: recorder,
healthChecker: healthChecker,
healthzServer: healthzServer,
ipvs: ipvs,
ipvsScheduler: scheduler,
ipGetter: &realIPGetter{nl: NewNetLinkHandle()},
iptablesData: bytes.NewBuffer(nil),
filterChainsData: bytes.NewBuffer(nil),
natChains: bytes.NewBuffer(nil),
natRules: bytes.NewBuffer(nil),
filterChains: bytes.NewBuffer(nil),
filterRules: bytes.NewBuffer(nil),
netlinkHandle: NewNetLinkHandle(),
ipset: ipset,
nodePortAddresses: nodePortAddresses,
networkInterfacer: utilproxy.RealNetwork{},
gracefuldeleteManager: NewGracefulTerminationManager(ipvs),
}
// initialize ipsetList with all sets we needed
proxier.ipsetList = make(map[string]*IPSet)
@@ -402,6 +393,7 @@ func NewProxier(ipt utiliptables.Interface,
burstSyncs := 2
glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs)
proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs)
proxier.gracefuldeleteManager.Run()
return proxier, nil
}
@@ -413,7 +405,7 @@ type serviceInfo struct {
}
// returns a new proxy.ServicePort which abstracts a serviceInfo
func newServiceInfo(port *api.ServicePort, service *api.Service, baseInfo *proxy.BaseServiceInfo) proxy.ServicePort {
func newServiceInfo(port *v1.ServicePort, service *v1.Service, baseInfo *proxy.BaseServiceInfo) proxy.ServicePort {
info := &serviceInfo{BaseServiceInfo: baseInfo}
// Store the following for performance reasons.
@@ -548,9 +540,6 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool
// CleanupLeftovers clean up all ipvs and iptables rules created by ipvs Proxier.
func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset utilipset.Interface, cleanupIPVS bool) (encounteredError bool) {
if canUse, _ := CanUseIPVSProxier(NewLinuxKernelHandler(), ipset); !canUse {
return false
}
if cleanupIPVS {
// Return immediately when ipvs interface is nil - Probably initialization failed in somewhere.
if ipvs == nil {
@@ -578,7 +567,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset
err = ipset.DestroySet(set.name)
if err != nil {
if !utilipset.IsNotFoundError(err) {
glog.Errorf("Error removing ipset %s, error: %v", set, err)
glog.Errorf("Error removing ipset %s, error: %v", set.name, err)
encounteredError = true
}
}
@@ -613,19 +602,19 @@ func (proxier *Proxier) isInitialized() bool {
}
// OnServiceAdd is called whenever creation of new service object is observed.
func (proxier *Proxier) OnServiceAdd(service *api.Service) {
func (proxier *Proxier) OnServiceAdd(service *v1.Service) {
proxier.OnServiceUpdate(nil, service)
}
// OnServiceUpdate is called whenever modification of an existing service object is observed.
func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) {
func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) {
if proxier.serviceChanges.Update(oldService, service) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
// OnServiceDelete is called whenever deletion of an existing service object is observed.
func (proxier *Proxier) OnServiceDelete(service *api.Service) {
func (proxier *Proxier) OnServiceDelete(service *v1.Service) {
proxier.OnServiceUpdate(service, nil)
}
@@ -641,19 +630,19 @@ func (proxier *Proxier) OnServiceSynced() {
}
// OnEndpointsAdd is called whenever creation of new endpoints object is observed.
func (proxier *Proxier) OnEndpointsAdd(endpoints *api.Endpoints) {
func (proxier *Proxier) OnEndpointsAdd(endpoints *v1.Endpoints) {
proxier.OnEndpointsUpdate(nil, endpoints)
}
// OnEndpointsUpdate is called whenever modification of an existing endpoints object is observed.
func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) {
if proxier.endpointsChanges.Update(oldEndpoints, endpoints) && proxier.isInitialized() {
proxier.syncRunner.Run()
}
}
// OnEndpointsDelete is called whenever deletion of an existing endpoints object is observed.
func (proxier *Proxier) OnEndpointsDelete(endpoints *api.Endpoints) {
func (proxier *Proxier) OnEndpointsDelete(endpoints *v1.Endpoints) {
proxier.OnEndpointsUpdate(endpoints, nil)
}
@@ -697,7 +686,7 @@ func (proxier *Proxier) syncProxyRules() {
staleServices := serviceUpdateResult.UDPStaleClusterIP
// merge stale services gathered from updateEndpointsMap
for _, svcPortName := range endpointUpdateResult.StaleServiceNames {
if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.GetProtocol() == api.ProtocolUDP {
if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.GetProtocol() == v1.ProtocolUDP {
glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.ClusterIPString())
staleServices.Insert(svcInfo.ClusterIPString())
}
@@ -711,6 +700,8 @@ func (proxier *Proxier) syncProxyRules() {
// This is to avoid memory reallocations and thus improve performance.
proxier.natChains.Reset()
proxier.natRules.Reset()
proxier.filterChains.Reset()
proxier.filterRules.Reset()
// Write table headers.
writeLine(proxier.filterChains, "*filter")
@@ -739,6 +730,8 @@ func (proxier *Proxier) syncProxyRules() {
activeIPVSServices := map[string]bool{}
// currentIPVSServices represent IPVS services listed from the system
currentIPVSServices := make(map[string]*utilipvs.VirtualServer)
// activeBindAddrs represents ip address successfully bind to DefaultDummyDevice in this round of sync
activeBindAddrs := map[string]bool{}
// Build IPVS rules for each service.
for svcName, svc := range proxier.serviceMap {
@@ -789,14 +782,11 @@ func (proxier *Proxier) syncProxyRules() {
}
// add service Cluster IP:Port to kubeServiceAccess ip set for the purpose of solving hairpin.
// proxier.kubeServiceAccessSet.activeEntries.Insert(entry.String())
// Install masquerade rules if 'masqueradeAll' or 'clusterCIDR' is specified.
if proxier.masqueradeAll || len(proxier.clusterCIDR) > 0 {
if valid := proxier.ipsetList[kubeClusterIPSet].validateEntry(entry); !valid {
glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeClusterIPSet].Name))
continue
}
proxier.ipsetList[kubeClusterIPSet].activeEntries.Insert(entry.String())
if valid := proxier.ipsetList[kubeClusterIPSet].validateEntry(entry); !valid {
glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeClusterIPSet].Name))
continue
}
proxier.ipsetList[kubeClusterIPSet].activeEntries.Insert(entry.String())
// ipvs call
serv := &utilipvs.VirtualServer{
Address: svcInfo.ClusterIP,
@@ -805,13 +795,14 @@ func (proxier *Proxier) syncProxyRules() {
Scheduler: proxier.ipvsScheduler,
}
// Set session affinity flag and timeout for IPVS service
if svcInfo.SessionAffinityType == api.ServiceAffinityClientIP {
if svcInfo.SessionAffinityType == v1.ServiceAffinityClientIP {
serv.Flags |= utilipvs.FlagPersistent
serv.Timeout = uint32(svcInfo.StickyMaxAgeSeconds)
}
// We need to bind ClusterIP to dummy interface, so set `bindAddr` parameter to `true` in syncService()
if err := proxier.syncService(svcNameString, serv, true); err == nil {
activeIPVSServices[serv.String()] = true
activeBindAddrs[serv.Address.String()] = true
// ExternalTrafficPolicy only works for NodePort and external LB traffic, does not affect ClusterIP
// So we still need clusterIP rules in onlyNodeLocalEndpoints mode.
if err := proxier.syncEndpoint(svcName, false, serv); err != nil {
@@ -825,7 +816,9 @@ func (proxier *Proxier) syncProxyRules() {
for _, externalIP := range svcInfo.ExternalIPs {
if local, err := utilproxy.IsLocalIP(externalIP); err != nil {
glog.Errorf("can't determine if IP is local, assuming not: %v", err)
} else if local {
// We do not start listening on SCTP ports, according to our agreement in the
// SCTP support KEP
} else if local && (svcInfo.GetProtocol() != v1.ProtocolSCTP) {
lp := utilproxy.LocalPort{
Description: "externalIP for " + svcNameString,
IP: externalIP,
@@ -841,12 +834,12 @@ func (proxier *Proxier) syncProxyRules() {
msg := fmt.Sprintf("can't open %s, skipping this externalIP: %v", lp.String(), err)
proxier.recorder.Eventf(
&clientv1.ObjectReference{
&v1.ObjectReference{
Kind: "Node",
Name: proxier.hostname,
UID: types.UID(proxier.hostname),
Namespace: "",
}, api.EventTypeWarning, err.Error(), msg)
}, v1.EventTypeWarning, err.Error(), msg)
glog.Error(msg)
continue
}
@@ -875,12 +868,13 @@ func (proxier *Proxier) syncProxyRules() {
Protocol: string(svcInfo.Protocol),
Scheduler: proxier.ipvsScheduler,
}
if svcInfo.SessionAffinityType == api.ServiceAffinityClientIP {
if svcInfo.SessionAffinityType == v1.ServiceAffinityClientIP {
serv.Flags |= utilipvs.FlagPersistent
serv.Timeout = uint32(svcInfo.StickyMaxAgeSeconds)
}
if err := proxier.syncService(svcNameString, serv, true); err == nil {
activeIPVSServices[serv.String()] = true
activeBindAddrs[serv.Address.String()] = true
if err := proxier.syncEndpoint(svcName, false, serv); err != nil {
glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err)
}
@@ -975,14 +969,15 @@ func (proxier *Proxier) syncProxyRules() {
Protocol: string(svcInfo.Protocol),
Scheduler: proxier.ipvsScheduler,
}
if svcInfo.SessionAffinityType == api.ServiceAffinityClientIP {
if svcInfo.SessionAffinityType == v1.ServiceAffinityClientIP {
serv.Flags |= utilipvs.FlagPersistent
serv.Timeout = uint32(svcInfo.StickyMaxAgeSeconds)
}
if err := proxier.syncService(svcNameString, serv, true); err == nil {
// check if service need skip endpoints that not in same host as kube-proxy
onlyLocal := svcInfo.SessionAffinityType == api.ServiceAffinityClientIP && svcInfo.OnlyNodeLocalEndpoints
onlyLocal := svcInfo.SessionAffinityType == v1.ServiceAffinityClientIP && svcInfo.OnlyNodeLocalEndpoints
activeIPVSServices[serv.String()] = true
activeBindAddrs[serv.Address.String()] = true
if err := proxier.syncEndpoint(svcName, onlyLocal, serv); err != nil {
glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err)
}
@@ -1022,7 +1017,9 @@ func (proxier *Proxier) syncProxyRules() {
if proxier.portsMap[lp] != nil {
glog.V(4).Infof("Port %s was open before and is still needed", lp.String())
replacementPortsMap[lp] = proxier.portsMap[lp]
} else {
// We do not start listening on SCTP ports, according to our agreement in the
// SCTP support KEP
} else if svcInfo.GetProtocol() != v1.ProtocolSCTP {
socket, err := proxier.portMapper.OpenLocalPort(&lp)
if err != nil {
glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err)
@@ -1030,7 +1027,7 @@ func (proxier *Proxier) syncProxyRules() {
}
if lp.Protocol == "udp" {
isIPv6 := utilnet.IsIPv6(svcInfo.ClusterIP)
conntrack.ClearEntriesForPort(proxier.exec, lp.Port, isIPv6, clientv1.ProtocolUDP)
conntrack.ClearEntriesForPort(proxier.exec, lp.Port, isIPv6, v1.ProtocolUDP)
}
replacementPortsMap[lp] = socket
} // We're holding the port, so it's OK to install ipvs rules.
@@ -1050,6 +1047,8 @@ func (proxier *Proxier) syncProxyRules() {
nodePortSet = proxier.ipsetList[kubeNodePortSetTCP]
case "udp":
nodePortSet = proxier.ipsetList[kubeNodePortSetUDP]
case "sctp":
nodePortSet = proxier.ipsetList[kubeNodePortSetSCTP]
default:
// It should never hit
glog.Errorf("Unsupported protocol type: %s", protocol)
@@ -1070,6 +1069,8 @@ func (proxier *Proxier) syncProxyRules() {
nodePortLocalSet = proxier.ipsetList[kubeNodePortLocalSetTCP]
case "udp":
nodePortLocalSet = proxier.ipsetList[kubeNodePortLocalSetUDP]
case "sctp":
nodePortLocalSet = proxier.ipsetList[kubeNodePortLocalSetSCTP]
default:
// It should never hit
glog.Errorf("Unsupported protocol type: %s", protocol)
@@ -1104,14 +1105,14 @@ func (proxier *Proxier) syncProxyRules() {
Protocol: string(svcInfo.Protocol),
Scheduler: proxier.ipvsScheduler,
}
if svcInfo.SessionAffinityType == api.ServiceAffinityClientIP {
if svcInfo.SessionAffinityType == v1.ServiceAffinityClientIP {
serv.Flags |= utilipvs.FlagPersistent
serv.Timeout = uint32(svcInfo.StickyMaxAgeSeconds)
}
// There is no need to bind Node IP to dummy interface, so set parameter `bindAddr` to `false`.
if err := proxier.syncService(svcNameString, serv, false); err == nil {
activeIPVSServices[serv.String()] = true
if err := proxier.syncEndpoint(svcName, false, serv); err != nil {
if err := proxier.syncEndpoint(svcName, svcInfo.OnlyNodeLocalEndpoints, serv); err != nil {
glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err)
}
} else {
@@ -1135,6 +1136,8 @@ func (proxier *Proxier) syncProxyRules() {
proxier.iptablesData.Reset()
proxier.iptablesData.Write(proxier.natChains.Bytes())
proxier.iptablesData.Write(proxier.natRules.Bytes())
proxier.iptablesData.Write(proxier.filterChains.Bytes())
proxier.iptablesData.Write(proxier.filterRules.Bytes())
glog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes())
err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
@@ -1164,6 +1167,14 @@ func (proxier *Proxier) syncProxyRules() {
}
proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices)
// Clean up legacy bind address
// currentBindAddrs represents ip addresses bind to DefaultDummyDevice from the system
currentBindAddrs, err := proxier.netlinkHandle.ListBindAddress(DefaultDummyDevice)
if err != nil {
glog.Errorf("Failed to get bind address, err: %v", err)
}
proxier.cleanLegacyBindAddr(activeBindAddrs, currentBindAddrs)
// Update healthz timestamp
if proxier.healthzServer != nil {
proxier.healthzServer.UpdateTimestamp()
@@ -1182,7 +1193,7 @@ func (proxier *Proxier) syncProxyRules() {
// Finish housekeeping.
// TODO: these could be made more consistent.
for _, svcIP := range staleServices.UnsortedList() {
if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, clientv1.ProtocolUDP); err != nil {
if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil {
glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err)
}
}
@@ -1207,8 +1218,11 @@ func (proxier *Proxier) writeIptablesRules() {
for _, set := range ipsetWithIptablesChain {
if _, find := proxier.ipsetList[set.name]; find && !proxier.ipsetList[set.name].isEmpty() {
args = append(args[:0],
"-A", set.from,
args = append(args[:0], "-A", set.from)
if set.protocolMatch != "" {
args = append(args, "-p", set.protocolMatch)
}
args = append(args,
"-m", "comment", "--comment", proxier.ipsetList[set.name].getComment(),
"-m", "set", "--match-set", set.name,
set.matchType,
@@ -1267,39 +1281,12 @@ func (proxier *Proxier) writeIptablesRules() {
writeLine(proxier.natRules, append(dstLocalOnlyArgs, "-j", "ACCEPT")...)
}
if !proxier.ipsetList[kubeNodePortSetUDP].isEmpty() {
// accept for nodeports w/ externaltrafficpolicy=local
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "udp", "-p", "udp",
"-m", "comment", "--comment", proxier.ipsetList[kubeNodePortSetUDP].getComment(),
"-m", "set", "--match-set", kubeNodePortSetUDP,
"dst",
)
writeLine(proxier.natRules, append(args, "-j", string(KubeNodePortChain))...)
if !proxier.ipsetList[kubeNodePortLocalSetUDP].isEmpty() {
args = append(args[:0],
"-A", string(KubeNodePortChain),
"-m", "comment", "--comment", proxier.ipsetList[kubeNodePortLocalSetUDP].getComment(),
"-m", "set", "--match-set", kubeNodePortLocalSetUDP,
"dst",
)
writeLine(proxier.natRules, append(args, "-j", "ACCEPT")...)
}
// mark masq for others
args = append(args[:0],
"-A", string(KubeNodePortChain),
"-m", "comment", "--comment",
fmt.Sprintf(`"mark MASQ for externaltrafficpolicy=cluster"`),
)
writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...)
}
// mark masq for KUBE-NODE-PORT
writeLine(proxier.natRules, []string{
"-A", string(KubeNodePortChain),
"-j", string(KubeMarkMasqChain),
}...)
// -A KUBE-SERVICES -m addrtype --dst-type LOCAL -j KUBE-NODE-PORT
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "addrtype", "--dst-type", "LOCAL",
)
writeLine(proxier.natRules, append(args, "-j", string(KubeNodePortChain))...)
// mark drop for KUBE-LOAD-BALANCER
writeLine(proxier.natRules, []string{
@@ -1379,8 +1366,8 @@ func (proxier *Proxier) acceptIPVSTraffic() {
// createAndLinkeKubeChain create all kube chains that ipvs proxier need and write basic link.
func (proxier *Proxier) createAndLinkeKubeChain() {
existingFilterChains := proxier.getExistingChains(utiliptables.TableFilter)
existingNATChains := proxier.getExistingChains(utiliptables.TableNAT)
existingFilterChains := proxier.getExistingChains(proxier.filterChainsData, utiliptables.TableFilter)
existingNATChains := proxier.getExistingChains(proxier.iptablesData, utiliptables.TableNAT)
// Make sure we keep stats for the top-level chains
for _, ch := range iptablesChains {
@@ -1390,13 +1377,13 @@ func (proxier *Proxier) createAndLinkeKubeChain() {
}
if ch.table == utiliptables.TableNAT {
if chain, ok := existingNATChains[ch.chain]; ok {
writeLine(proxier.natChains, chain)
writeBytesLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(kubePostroutingChain))
}
} else {
if chain, ok := existingFilterChains[KubeForwardChain]; ok {
writeLine(proxier.filterChains, chain)
writeBytesLine(proxier.filterChains, chain)
} else {
writeLine(proxier.filterChains, utiliptables.MakeChainLine(KubeForwardChain))
}
@@ -1431,13 +1418,14 @@ func (proxier *Proxier) createAndLinkeKubeChain() {
// getExistingChains get iptables-save output so we can check for existing chains and rules.
// This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore
func (proxier *Proxier) getExistingChains(table utiliptables.Table) map[utiliptables.Chain]string {
proxier.iptablesData.Reset()
err := proxier.iptables.SaveInto(table, proxier.iptablesData)
// Result may SHARE memory with contents of buffer.
func (proxier *Proxier) getExistingChains(buffer *bytes.Buffer, table utiliptables.Table) map[utiliptables.Chain][]byte {
buffer.Reset()
err := proxier.iptables.SaveInto(table, buffer)
if err != nil { // if we failed to get any rules
glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err)
} else { // otherwise parse the output
return utiliptables.GetChainLines(table, proxier.iptablesData.Bytes())
return utiliptables.GetChainLines(table, buffer.Bytes())
}
return nil
}
@@ -1447,9 +1435,9 @@ func (proxier *Proxier) getExistingChains(table utiliptables.Table) map[utilipta
// This assumes the proxier mutex is held
func (proxier *Proxier) deleteEndpointConnections(connectionMap []proxy.ServiceEndpoint) {
for _, epSvcPair := range connectionMap {
if svcInfo, ok := proxier.serviceMap[epSvcPair.ServicePortName]; ok && svcInfo.GetProtocol() == api.ProtocolUDP {
if svcInfo, ok := proxier.serviceMap[epSvcPair.ServicePortName]; ok && svcInfo.GetProtocol() == v1.ProtocolUDP {
endpointIP := utilproxy.IPPart(epSvcPair.Endpoint)
err := conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIPString(), endpointIP, clientv1.ProtocolUDP)
err := conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIPString(), endpointIP, v1.ProtocolUDP)
if err != nil {
glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err)
}
@@ -1481,6 +1469,7 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer,
// bind service address to dummy interface even if service not changed,
// in case that service IP was removed by other processes
if bindAddr {
glog.V(4).Infof("Bind addr %s", vs.Address.String())
_, err := proxier.netlinkHandle.EnsureAddressBind(vs.Address.String(), DefaultDummyDevice)
if err != nil {
glog.Errorf("Failed to bind service address to dummy device %q: %v", svcName, err)
@@ -1518,66 +1507,93 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
newEndpoints.Insert(epInfo.String())
}
if !curEndpoints.Equal(newEndpoints) {
// Create new endpoints
for _, ep := range newEndpoints.Difference(curEndpoints).UnsortedList() {
ip, port, err := net.SplitHostPort(ep)
if err != nil {
glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err)
continue
}
portNum, err := strconv.Atoi(port)
if err != nil {
glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err)
continue
}
// Create new endpoints
for _, ep := range newEndpoints.List() {
ip, port, err := net.SplitHostPort(ep)
if err != nil {
glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err)
continue
}
portNum, err := strconv.Atoi(port)
if err != nil {
glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err)
continue
}
newDest := &utilipvs.RealServer{
Address: net.ParseIP(ip),
Port: uint16(portNum),
Weight: 1,
newDest := &utilipvs.RealServer{
Address: net.ParseIP(ip),
Port: uint16(portNum),
Weight: 1,
}
if curEndpoints.Has(ep) {
// check if newEndpoint is in gracefulDelete list, if true, delete this ep immediately
uniqueRS := GetUniqueRSName(vs, newDest)
if !proxier.gracefuldeleteManager.InTerminationList(uniqueRS) {
continue
}
err = proxier.ipvs.AddRealServer(appliedVirtualServer, newDest)
glog.V(5).Infof("new ep %q is in graceful delete list", uniqueRS)
err := proxier.gracefuldeleteManager.MoveRSOutofGracefulDeleteList(uniqueRS)
if err != nil {
glog.Errorf("Failed to add destination: %v, error: %v", newDest, err)
glog.Errorf("Failed to delete endpoint: %v in gracefulDeleteQueue, error: %v", ep, err)
continue
}
}
// Delete old endpoints
for _, ep := range curEndpoints.Difference(newEndpoints).UnsortedList() {
ip, port, err := net.SplitHostPort(ep)
if err != nil {
glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err)
continue
}
portNum, err := strconv.Atoi(port)
if err != nil {
glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err)
continue
}
err = proxier.ipvs.AddRealServer(appliedVirtualServer, newDest)
if err != nil {
glog.Errorf("Failed to add destination: %v, error: %v", newDest, err)
continue
}
}
// Delete old endpoints
for _, ep := range curEndpoints.Difference(newEndpoints).UnsortedList() {
// if curEndpoint is in gracefulDelete, skip
uniqueRS := vs.String() + "/" + ep
if proxier.gracefuldeleteManager.InTerminationList(uniqueRS) {
continue
}
ip, port, err := net.SplitHostPort(ep)
if err != nil {
glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err)
continue
}
portNum, err := strconv.Atoi(port)
if err != nil {
glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err)
continue
}
delDest := &utilipvs.RealServer{
Address: net.ParseIP(ip),
Port: uint16(portNum),
}
err = proxier.ipvs.DeleteRealServer(appliedVirtualServer, delDest)
if err != nil {
glog.Errorf("Failed to delete destination: %v, error: %v", delDest, err)
continue
}
delDest := &utilipvs.RealServer{
Address: net.ParseIP(ip),
Port: uint16(portNum),
}
glog.V(5).Infof("Using graceful delete to delete: %v", delDest)
err = proxier.gracefuldeleteManager.GracefulDeleteRS(appliedVirtualServer, delDest)
if err != nil {
glog.Errorf("Failed to delete destination: %v, error: %v", delDest, err)
continue
}
}
return nil
}
func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, currentServices map[string]*utilipvs.VirtualServer) {
unbindIPAddr := sets.NewString()
for cs := range currentServices {
svc := currentServices[cs]
if _, ok := activeServices[cs]; !ok {
// This service was not processed in the latest sync loop so before deleting it,
// make sure it does not fall within an excluded CIDR range.
okayToDelete := true
rsList, _ := proxier.ipvs.GetRealServers(svc)
for _, rs := range rsList {
uniqueRS := GetUniqueRSName(svc, rs)
// if there are in terminating real server in this service, then handle it later
if proxier.gracefuldeleteManager.InTerminationList(uniqueRS) {
okayToDelete = false
break
}
}
for _, excludedCIDR := range proxier.excludeCIDRs {
// Any validation of this CIDR already should have occurred.
_, n, _ := net.ParseCIDR(excludedCIDR)
@@ -1590,16 +1606,21 @@ func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, curre
if err := proxier.ipvs.DeleteVirtualServer(svc); err != nil {
glog.Errorf("Failed to delete service, error: %v", err)
}
unbindIPAddr.Insert(svc.Address.String())
}
}
}
}
for _, addr := range unbindIPAddr.UnsortedList() {
err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice)
// Ignore no such address error when try to unbind address
if err != nil {
glog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err)
func (proxier *Proxier) cleanLegacyBindAddr(activeBindAddrs map[string]bool, currentBindAddrs []string) {
for _, addr := range currentBindAddrs {
if _, ok := activeBindAddrs[addr]; !ok {
// This address was not processed in the latest sync loop
glog.V(4).Infof("Unbind addr %s", addr)
err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice)
// Ignore no such address error when try to unbind address
if err != nil {
glog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err)
}
}
}
}
@@ -1617,6 +1638,11 @@ func writeLine(buf *bytes.Buffer, words ...string) {
}
}
func writeBytesLine(buf *bytes.Buffer, bytes []byte) {
buf.Write(bytes)
buf.WriteByte('\n')
}
// listenPortOpener opens ports by calling bind() and listen().
type listenPortOpener struct{}

File diff suppressed because it is too large Load Diff

View File

@@ -18,7 +18,7 @@ go_library(
tags = ["automanaged"],
deps = [
"//pkg/util/ipset:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)
@@ -39,5 +39,5 @@ go_test(
name = "go_default_test",
srcs = ["fake_test.go"],
embed = [":go_default_library"],
deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"],
deps = ["//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library"],
)

View File

@@ -57,18 +57,26 @@ func (h *FakeNetlinkHandle) DeleteDummyDevice(devName string) error {
return nil
}
// ListBindAddress is a mock implementation
func (h *FakeNetlinkHandle) ListBindAddress(devName string) ([]string, error) {
return nil, nil
}
// GetLocalAddresses is a mock implementation
func (h *FakeNetlinkHandle) GetLocalAddresses(filterDev string) (sets.String, error) {
func (h *FakeNetlinkHandle) GetLocalAddresses(dev, filterDev string) (sets.String, error) {
res := sets.NewString()
if len(filterDev) != 0 {
if len(dev) != 0 {
// list all addresses from a given network interface.
for _, addr := range h.localAddresses[filterDev] {
for _, addr := range h.localAddresses[dev] {
res.Insert(addr)
}
return res, nil
}
// If filterDev is not given, will list all addresses from all available network interface.
for linkName := range h.localAddresses {
if linkName == filterDev {
continue
}
// list all addresses from a given network interface.
for _, addr := range h.localAddresses[linkName] {
res.Insert(addr)

View File

@@ -27,21 +27,21 @@ func TestSetGetLocalAddresses(t *testing.T) {
fake := NewFakeNetlinkHandle()
fake.SetLocalAddresses("eth0", "1.2.3.4")
expected := sets.NewString("1.2.3.4")
addr, _ := fake.GetLocalAddresses("eth0")
addr, _ := fake.GetLocalAddresses("eth0", "")
if !reflect.DeepEqual(expected, addr) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, addr)
}
list, _ := fake.GetLocalAddresses("")
list, _ := fake.GetLocalAddresses("", "")
if !reflect.DeepEqual(expected, list) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, list)
}
fake.SetLocalAddresses("lo", "127.0.0.1")
expected = sets.NewString("127.0.0.1")
addr, _ = fake.GetLocalAddresses("lo")
addr, _ = fake.GetLocalAddresses("lo", "")
if !reflect.DeepEqual(expected, addr) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, addr)
}
list, _ = fake.GetLocalAddresses("")
list, _ = fake.GetLocalAddresses("", "")
expected = sets.NewString("1.2.3.4", "127.0.0.1")
if !reflect.DeepEqual(expected, list) {
t.Errorf("Unexpected mismatch, expected: %v, got: %v", expected, list)