Bumping k8s dependencies to 1.13
This commit is contained in:
424
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go
generated
vendored
424
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go
generated
vendored
@@ -30,12 +30,11 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/record"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
||||
"k8s.io/kubernetes/pkg/proxy/metrics"
|
||||
@@ -129,26 +128,34 @@ var ipsetInfo = []struct {
|
||||
{kubeNodePortLocalSetTCP, utilipset.BitmapPort, false, kubeNodePortLocalSetTCPComment},
|
||||
{kubeNodePortSetUDP, utilipset.BitmapPort, false, kubeNodePortSetUDPComment},
|
||||
{kubeNodePortLocalSetUDP, utilipset.BitmapPort, false, kubeNodePortLocalSetUDPComment},
|
||||
{kubeNodePortSetSCTP, utilipset.BitmapPort, false, kubeNodePortSetSCTPComment},
|
||||
{kubeNodePortLocalSetSCTP, utilipset.BitmapPort, false, kubeNodePortLocalSetSCTPComment},
|
||||
}
|
||||
|
||||
// ipsetWithIptablesChain is the ipsets list with iptables source chain and the chain jump to
|
||||
// `iptables -t nat -A <from> -m set --match-set <name> <matchType> -j <to>`
|
||||
// example: iptables -t nat -A KUBE-SERVICES -m set --match-set KUBE-NODE-PORT-TCP dst -j KUBE-NODE-PORT
|
||||
// ipsets with other match rules will be created Individually.
|
||||
// Note: kubeNodePortLocalSetTCP must be prior to kubeNodePortSetTCP, the same for UDP.
|
||||
var ipsetWithIptablesChain = []struct {
|
||||
name string
|
||||
from string
|
||||
to string
|
||||
matchType string
|
||||
name string
|
||||
from string
|
||||
to string
|
||||
matchType string
|
||||
protocolMatch string
|
||||
}{
|
||||
{kubeLoopBackIPSet, string(kubePostroutingChain), "MASQUERADE", "dst,dst,src"},
|
||||
{kubeLoadBalancerSet, string(kubeServicesChain), string(KubeLoadBalancerChain), "dst,dst"},
|
||||
{kubeLoadbalancerFWSet, string(KubeLoadBalancerChain), string(KubeFireWallChain), "dst,dst"},
|
||||
{kubeLoadBalancerSourceCIDRSet, string(KubeFireWallChain), "RETURN", "dst,dst,src"},
|
||||
{kubeLoadBalancerSourceIPSet, string(KubeFireWallChain), "RETURN", "dst,dst,src"},
|
||||
{kubeLoadBalancerLocalSet, string(KubeLoadBalancerChain), "RETURN", "dst,dst"},
|
||||
{kubeNodePortSetTCP, string(kubeServicesChain), string(KubeNodePortChain), "dst"},
|
||||
{kubeNodePortLocalSetTCP, string(KubeNodePortChain), "RETURN", "dst"},
|
||||
{kubeLoopBackIPSet, string(kubePostroutingChain), "MASQUERADE", "dst,dst,src", ""},
|
||||
{kubeLoadBalancerSet, string(kubeServicesChain), string(KubeLoadBalancerChain), "dst,dst", ""},
|
||||
{kubeLoadbalancerFWSet, string(KubeLoadBalancerChain), string(KubeFireWallChain), "dst,dst", ""},
|
||||
{kubeLoadBalancerSourceCIDRSet, string(KubeFireWallChain), "RETURN", "dst,dst,src", ""},
|
||||
{kubeLoadBalancerSourceIPSet, string(KubeFireWallChain), "RETURN", "dst,dst,src", ""},
|
||||
{kubeLoadBalancerLocalSet, string(KubeLoadBalancerChain), "RETURN", "dst,dst", ""},
|
||||
{kubeNodePortLocalSetTCP, string(KubeNodePortChain), "RETURN", "dst", "tcp"},
|
||||
{kubeNodePortSetTCP, string(KubeNodePortChain), string(KubeMarkMasqChain), "dst", "tcp"},
|
||||
{kubeNodePortLocalSetUDP, string(KubeNodePortChain), "RETURN", "dst", "udp"},
|
||||
{kubeNodePortSetUDP, string(KubeNodePortChain), string(KubeMarkMasqChain), "dst", "udp"},
|
||||
{kubeNodePortSetSCTP, string(kubeServicesChain), string(KubeNodePortChain), "dst", "sctp"},
|
||||
{kubeNodePortLocalSetSCTP, string(KubeNodePortChain), "RETURN", "dst", "sctp"},
|
||||
}
|
||||
|
||||
var ipvsModules = []string{
|
||||
@@ -210,11 +217,12 @@ type Proxier struct {
|
||||
ipGetter IPGetter
|
||||
// The following buffers are used to reuse memory and avoid allocations
|
||||
// that are significantly impacting performance.
|
||||
iptablesData *bytes.Buffer
|
||||
natChains *bytes.Buffer
|
||||
filterChains *bytes.Buffer
|
||||
natRules *bytes.Buffer
|
||||
filterRules *bytes.Buffer
|
||||
iptablesData *bytes.Buffer
|
||||
filterChainsData *bytes.Buffer
|
||||
natChains *bytes.Buffer
|
||||
filterChains *bytes.Buffer
|
||||
natRules *bytes.Buffer
|
||||
filterRules *bytes.Buffer
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
netlinkHandle NetLinkHandle
|
||||
// ipsetList is the list of ipsets that ipvs proxier used.
|
||||
@@ -223,7 +231,8 @@ type Proxier struct {
|
||||
nodePortAddresses []string
|
||||
// networkInterfacer defines an interface for several net library functions.
|
||||
// Inject for test purpose.
|
||||
networkInterfacer utilproxy.NetworkInterfacer
|
||||
networkInterfacer utilproxy.NetworkInterfacer
|
||||
gracefuldeleteManager *GracefulTerminationManager
|
||||
}
|
||||
|
||||
// IPGetter helps get node network interface IP
|
||||
@@ -238,7 +247,8 @@ type realIPGetter struct {
|
||||
}
|
||||
|
||||
// NodeIPs returns all LOCAL type IP addresses from host which are taken as the Node IPs of NodePort service.
|
||||
// Firstly, it will list source IP exists in local route table with `kernel` protocol type. For example,
|
||||
// It will list source IP exists in local route table with `kernel` protocol type, and filter out IPVS proxier
|
||||
// created dummy device `kube-ipvs0` For example,
|
||||
// $ ip route show table local type local proto kernel
|
||||
// 10.0.0.1 dev kube-ipvs0 scope host src 10.0.0.1
|
||||
// 10.0.0.10 dev kube-ipvs0 scope host src 10.0.0.10
|
||||
@@ -248,35 +258,14 @@ type realIPGetter struct {
|
||||
// 127.0.0.1 dev lo scope host src 127.0.0.1
|
||||
// 172.17.0.1 dev docker0 scope host src 172.17.0.1
|
||||
// 192.168.122.1 dev virbr0 scope host src 192.168.122.1
|
||||
// Then cut the unique src IP fields,
|
||||
// --> result set1: [10.0.0.1, 10.0.0.10, 10.0.0.252, 100.106.89.164, 127.0.0.1, 192.168.122.1]
|
||||
|
||||
// NOTE: For cases where an LB acts as a VIP (e.g. Google cloud), the VIP IP is considered LOCAL, but the protocol
|
||||
// of the entry is 66, e.g. `10.128.0.6 dev ens4 proto 66 scope host`. Therefore, the rule mentioned above will
|
||||
// filter these entries out.
|
||||
|
||||
// Secondly, as we bind Cluster IPs to the dummy interface in IPVS proxier, we need to filter the them out so that
|
||||
// we can eventually get the Node IPs. Fortunately, the dummy interface created by IPVS proxier is known as `kube-ipvs0`,
|
||||
// so we just need to specify the `dev kube-ipvs0` argument in ip route command, for example,
|
||||
// $ ip route show table local type local proto kernel dev kube-ipvs0
|
||||
// 10.0.0.1 scope host src 10.0.0.1
|
||||
// 10.0.0.10 scope host src 10.0.0.10
|
||||
// Then cut the unique src IP fields,
|
||||
// --> result set2: [10.0.0.1, 10.0.0.10]
|
||||
|
||||
// Finally, Node IP set = set1 - set2
|
||||
// Then filter out dev==kube-ipvs0, and cut the unique src IP fields,
|
||||
// Node IP set: [100.106.89.164, 127.0.0.1, 192.168.122.1]
|
||||
func (r *realIPGetter) NodeIPs() (ips []net.IP, err error) {
|
||||
// Pass in empty filter device name for list all LOCAL type addresses.
|
||||
allAddress, err := r.nl.GetLocalAddresses("")
|
||||
nodeAddress, err := r.nl.GetLocalAddresses("", DefaultDummyDevice)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing LOCAL type addresses from host, error: %v", err)
|
||||
}
|
||||
dummyAddress, err := r.nl.GetLocalAddresses(DefaultDummyDevice)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing LOCAL type addresses from device: %s, error: %v", DefaultDummyDevice, err)
|
||||
}
|
||||
// exclude ip address from dummy interface created by IPVS proxier - they are all Cluster IPs.
|
||||
nodeAddress := allAddress.Difference(dummyAddress)
|
||||
// translate ip string to IP
|
||||
for _, ipStr := range nodeAddress.UnsortedList() {
|
||||
ips = append(ips, net.ParseIP(ipStr))
|
||||
@@ -359,37 +348,39 @@ func NewProxier(ipt utiliptables.Interface,
|
||||
healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps
|
||||
|
||||
proxier := &Proxier{
|
||||
portsMap: make(map[utilproxy.LocalPort]utilproxy.Closeable),
|
||||
serviceMap: make(proxy.ServiceMap),
|
||||
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, &isIPv6, recorder),
|
||||
endpointsMap: make(proxy.EndpointsMap),
|
||||
endpointsChanges: proxy.NewEndpointChangeTracker(hostname, nil, &isIPv6, recorder),
|
||||
syncPeriod: syncPeriod,
|
||||
minSyncPeriod: minSyncPeriod,
|
||||
excludeCIDRs: excludeCIDRs,
|
||||
iptables: ipt,
|
||||
masqueradeAll: masqueradeAll,
|
||||
masqueradeMark: masqueradeMark,
|
||||
exec: exec,
|
||||
clusterCIDR: clusterCIDR,
|
||||
hostname: hostname,
|
||||
nodeIP: nodeIP,
|
||||
portMapper: &listenPortOpener{},
|
||||
recorder: recorder,
|
||||
healthChecker: healthChecker,
|
||||
healthzServer: healthzServer,
|
||||
ipvs: ipvs,
|
||||
ipvsScheduler: scheduler,
|
||||
ipGetter: &realIPGetter{nl: NewNetLinkHandle()},
|
||||
iptablesData: bytes.NewBuffer(nil),
|
||||
natChains: bytes.NewBuffer(nil),
|
||||
natRules: bytes.NewBuffer(nil),
|
||||
filterChains: bytes.NewBuffer(nil),
|
||||
filterRules: bytes.NewBuffer(nil),
|
||||
netlinkHandle: NewNetLinkHandle(),
|
||||
ipset: ipset,
|
||||
nodePortAddresses: nodePortAddresses,
|
||||
networkInterfacer: utilproxy.RealNetwork{},
|
||||
portsMap: make(map[utilproxy.LocalPort]utilproxy.Closeable),
|
||||
serviceMap: make(proxy.ServiceMap),
|
||||
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, &isIPv6, recorder),
|
||||
endpointsMap: make(proxy.EndpointsMap),
|
||||
endpointsChanges: proxy.NewEndpointChangeTracker(hostname, nil, &isIPv6, recorder),
|
||||
syncPeriod: syncPeriod,
|
||||
minSyncPeriod: minSyncPeriod,
|
||||
excludeCIDRs: excludeCIDRs,
|
||||
iptables: ipt,
|
||||
masqueradeAll: masqueradeAll,
|
||||
masqueradeMark: masqueradeMark,
|
||||
exec: exec,
|
||||
clusterCIDR: clusterCIDR,
|
||||
hostname: hostname,
|
||||
nodeIP: nodeIP,
|
||||
portMapper: &listenPortOpener{},
|
||||
recorder: recorder,
|
||||
healthChecker: healthChecker,
|
||||
healthzServer: healthzServer,
|
||||
ipvs: ipvs,
|
||||
ipvsScheduler: scheduler,
|
||||
ipGetter: &realIPGetter{nl: NewNetLinkHandle()},
|
||||
iptablesData: bytes.NewBuffer(nil),
|
||||
filterChainsData: bytes.NewBuffer(nil),
|
||||
natChains: bytes.NewBuffer(nil),
|
||||
natRules: bytes.NewBuffer(nil),
|
||||
filterChains: bytes.NewBuffer(nil),
|
||||
filterRules: bytes.NewBuffer(nil),
|
||||
netlinkHandle: NewNetLinkHandle(),
|
||||
ipset: ipset,
|
||||
nodePortAddresses: nodePortAddresses,
|
||||
networkInterfacer: utilproxy.RealNetwork{},
|
||||
gracefuldeleteManager: NewGracefulTerminationManager(ipvs),
|
||||
}
|
||||
// initialize ipsetList with all sets we needed
|
||||
proxier.ipsetList = make(map[string]*IPSet)
|
||||
@@ -402,6 +393,7 @@ func NewProxier(ipt utiliptables.Interface,
|
||||
burstSyncs := 2
|
||||
glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs)
|
||||
proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs)
|
||||
proxier.gracefuldeleteManager.Run()
|
||||
return proxier, nil
|
||||
}
|
||||
|
||||
@@ -413,7 +405,7 @@ type serviceInfo struct {
|
||||
}
|
||||
|
||||
// returns a new proxy.ServicePort which abstracts a serviceInfo
|
||||
func newServiceInfo(port *api.ServicePort, service *api.Service, baseInfo *proxy.BaseServiceInfo) proxy.ServicePort {
|
||||
func newServiceInfo(port *v1.ServicePort, service *v1.Service, baseInfo *proxy.BaseServiceInfo) proxy.ServicePort {
|
||||
info := &serviceInfo{BaseServiceInfo: baseInfo}
|
||||
|
||||
// Store the following for performance reasons.
|
||||
@@ -548,9 +540,6 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool
|
||||
|
||||
// CleanupLeftovers clean up all ipvs and iptables rules created by ipvs Proxier.
|
||||
func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset utilipset.Interface, cleanupIPVS bool) (encounteredError bool) {
|
||||
if canUse, _ := CanUseIPVSProxier(NewLinuxKernelHandler(), ipset); !canUse {
|
||||
return false
|
||||
}
|
||||
if cleanupIPVS {
|
||||
// Return immediately when ipvs interface is nil - Probably initialization failed in somewhere.
|
||||
if ipvs == nil {
|
||||
@@ -578,7 +567,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset
|
||||
err = ipset.DestroySet(set.name)
|
||||
if err != nil {
|
||||
if !utilipset.IsNotFoundError(err) {
|
||||
glog.Errorf("Error removing ipset %s, error: %v", set, err)
|
||||
glog.Errorf("Error removing ipset %s, error: %v", set.name, err)
|
||||
encounteredError = true
|
||||
}
|
||||
}
|
||||
@@ -613,19 +602,19 @@ func (proxier *Proxier) isInitialized() bool {
|
||||
}
|
||||
|
||||
// OnServiceAdd is called whenever creation of new service object is observed.
|
||||
func (proxier *Proxier) OnServiceAdd(service *api.Service) {
|
||||
func (proxier *Proxier) OnServiceAdd(service *v1.Service) {
|
||||
proxier.OnServiceUpdate(nil, service)
|
||||
}
|
||||
|
||||
// OnServiceUpdate is called whenever modification of an existing service object is observed.
|
||||
func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) {
|
||||
func (proxier *Proxier) OnServiceUpdate(oldService, service *v1.Service) {
|
||||
if proxier.serviceChanges.Update(oldService, service) && proxier.isInitialized() {
|
||||
proxier.syncRunner.Run()
|
||||
}
|
||||
}
|
||||
|
||||
// OnServiceDelete is called whenever deletion of an existing service object is observed.
|
||||
func (proxier *Proxier) OnServiceDelete(service *api.Service) {
|
||||
func (proxier *Proxier) OnServiceDelete(service *v1.Service) {
|
||||
proxier.OnServiceUpdate(service, nil)
|
||||
}
|
||||
|
||||
@@ -641,19 +630,19 @@ func (proxier *Proxier) OnServiceSynced() {
|
||||
}
|
||||
|
||||
// OnEndpointsAdd is called whenever creation of new endpoints object is observed.
|
||||
func (proxier *Proxier) OnEndpointsAdd(endpoints *api.Endpoints) {
|
||||
func (proxier *Proxier) OnEndpointsAdd(endpoints *v1.Endpoints) {
|
||||
proxier.OnEndpointsUpdate(nil, endpoints)
|
||||
}
|
||||
|
||||
// OnEndpointsUpdate is called whenever modification of an existing endpoints object is observed.
|
||||
func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) {
|
||||
func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoints) {
|
||||
if proxier.endpointsChanges.Update(oldEndpoints, endpoints) && proxier.isInitialized() {
|
||||
proxier.syncRunner.Run()
|
||||
}
|
||||
}
|
||||
|
||||
// OnEndpointsDelete is called whenever deletion of an existing endpoints object is observed.
|
||||
func (proxier *Proxier) OnEndpointsDelete(endpoints *api.Endpoints) {
|
||||
func (proxier *Proxier) OnEndpointsDelete(endpoints *v1.Endpoints) {
|
||||
proxier.OnEndpointsUpdate(endpoints, nil)
|
||||
}
|
||||
|
||||
@@ -697,7 +686,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
staleServices := serviceUpdateResult.UDPStaleClusterIP
|
||||
// merge stale services gathered from updateEndpointsMap
|
||||
for _, svcPortName := range endpointUpdateResult.StaleServiceNames {
|
||||
if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.GetProtocol() == api.ProtocolUDP {
|
||||
if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.GetProtocol() == v1.ProtocolUDP {
|
||||
glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.ClusterIPString())
|
||||
staleServices.Insert(svcInfo.ClusterIPString())
|
||||
}
|
||||
@@ -711,6 +700,8 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// This is to avoid memory reallocations and thus improve performance.
|
||||
proxier.natChains.Reset()
|
||||
proxier.natRules.Reset()
|
||||
proxier.filterChains.Reset()
|
||||
proxier.filterRules.Reset()
|
||||
|
||||
// Write table headers.
|
||||
writeLine(proxier.filterChains, "*filter")
|
||||
@@ -739,6 +730,8 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
activeIPVSServices := map[string]bool{}
|
||||
// currentIPVSServices represent IPVS services listed from the system
|
||||
currentIPVSServices := make(map[string]*utilipvs.VirtualServer)
|
||||
// activeBindAddrs represents ip address successfully bind to DefaultDummyDevice in this round of sync
|
||||
activeBindAddrs := map[string]bool{}
|
||||
|
||||
// Build IPVS rules for each service.
|
||||
for svcName, svc := range proxier.serviceMap {
|
||||
@@ -789,14 +782,11 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
}
|
||||
// add service Cluster IP:Port to kubeServiceAccess ip set for the purpose of solving hairpin.
|
||||
// proxier.kubeServiceAccessSet.activeEntries.Insert(entry.String())
|
||||
// Install masquerade rules if 'masqueradeAll' or 'clusterCIDR' is specified.
|
||||
if proxier.masqueradeAll || len(proxier.clusterCIDR) > 0 {
|
||||
if valid := proxier.ipsetList[kubeClusterIPSet].validateEntry(entry); !valid {
|
||||
glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeClusterIPSet].Name))
|
||||
continue
|
||||
}
|
||||
proxier.ipsetList[kubeClusterIPSet].activeEntries.Insert(entry.String())
|
||||
if valid := proxier.ipsetList[kubeClusterIPSet].validateEntry(entry); !valid {
|
||||
glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeClusterIPSet].Name))
|
||||
continue
|
||||
}
|
||||
proxier.ipsetList[kubeClusterIPSet].activeEntries.Insert(entry.String())
|
||||
// ipvs call
|
||||
serv := &utilipvs.VirtualServer{
|
||||
Address: svcInfo.ClusterIP,
|
||||
@@ -805,13 +795,14 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
Scheduler: proxier.ipvsScheduler,
|
||||
}
|
||||
// Set session affinity flag and timeout for IPVS service
|
||||
if svcInfo.SessionAffinityType == api.ServiceAffinityClientIP {
|
||||
if svcInfo.SessionAffinityType == v1.ServiceAffinityClientIP {
|
||||
serv.Flags |= utilipvs.FlagPersistent
|
||||
serv.Timeout = uint32(svcInfo.StickyMaxAgeSeconds)
|
||||
}
|
||||
// We need to bind ClusterIP to dummy interface, so set `bindAddr` parameter to `true` in syncService()
|
||||
if err := proxier.syncService(svcNameString, serv, true); err == nil {
|
||||
activeIPVSServices[serv.String()] = true
|
||||
activeBindAddrs[serv.Address.String()] = true
|
||||
// ExternalTrafficPolicy only works for NodePort and external LB traffic, does not affect ClusterIP
|
||||
// So we still need clusterIP rules in onlyNodeLocalEndpoints mode.
|
||||
if err := proxier.syncEndpoint(svcName, false, serv); err != nil {
|
||||
@@ -825,7 +816,9 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
for _, externalIP := range svcInfo.ExternalIPs {
|
||||
if local, err := utilproxy.IsLocalIP(externalIP); err != nil {
|
||||
glog.Errorf("can't determine if IP is local, assuming not: %v", err)
|
||||
} else if local {
|
||||
// We do not start listening on SCTP ports, according to our agreement in the
|
||||
// SCTP support KEP
|
||||
} else if local && (svcInfo.GetProtocol() != v1.ProtocolSCTP) {
|
||||
lp := utilproxy.LocalPort{
|
||||
Description: "externalIP for " + svcNameString,
|
||||
IP: externalIP,
|
||||
@@ -841,12 +834,12 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
msg := fmt.Sprintf("can't open %s, skipping this externalIP: %v", lp.String(), err)
|
||||
|
||||
proxier.recorder.Eventf(
|
||||
&clientv1.ObjectReference{
|
||||
&v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: proxier.hostname,
|
||||
UID: types.UID(proxier.hostname),
|
||||
Namespace: "",
|
||||
}, api.EventTypeWarning, err.Error(), msg)
|
||||
}, v1.EventTypeWarning, err.Error(), msg)
|
||||
glog.Error(msg)
|
||||
continue
|
||||
}
|
||||
@@ -875,12 +868,13 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
Protocol: string(svcInfo.Protocol),
|
||||
Scheduler: proxier.ipvsScheduler,
|
||||
}
|
||||
if svcInfo.SessionAffinityType == api.ServiceAffinityClientIP {
|
||||
if svcInfo.SessionAffinityType == v1.ServiceAffinityClientIP {
|
||||
serv.Flags |= utilipvs.FlagPersistent
|
||||
serv.Timeout = uint32(svcInfo.StickyMaxAgeSeconds)
|
||||
}
|
||||
if err := proxier.syncService(svcNameString, serv, true); err == nil {
|
||||
activeIPVSServices[serv.String()] = true
|
||||
activeBindAddrs[serv.Address.String()] = true
|
||||
if err := proxier.syncEndpoint(svcName, false, serv); err != nil {
|
||||
glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err)
|
||||
}
|
||||
@@ -975,14 +969,15 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
Protocol: string(svcInfo.Protocol),
|
||||
Scheduler: proxier.ipvsScheduler,
|
||||
}
|
||||
if svcInfo.SessionAffinityType == api.ServiceAffinityClientIP {
|
||||
if svcInfo.SessionAffinityType == v1.ServiceAffinityClientIP {
|
||||
serv.Flags |= utilipvs.FlagPersistent
|
||||
serv.Timeout = uint32(svcInfo.StickyMaxAgeSeconds)
|
||||
}
|
||||
if err := proxier.syncService(svcNameString, serv, true); err == nil {
|
||||
// check if service need skip endpoints that not in same host as kube-proxy
|
||||
onlyLocal := svcInfo.SessionAffinityType == api.ServiceAffinityClientIP && svcInfo.OnlyNodeLocalEndpoints
|
||||
onlyLocal := svcInfo.SessionAffinityType == v1.ServiceAffinityClientIP && svcInfo.OnlyNodeLocalEndpoints
|
||||
activeIPVSServices[serv.String()] = true
|
||||
activeBindAddrs[serv.Address.String()] = true
|
||||
if err := proxier.syncEndpoint(svcName, onlyLocal, serv); err != nil {
|
||||
glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err)
|
||||
}
|
||||
@@ -1022,7 +1017,9 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
if proxier.portsMap[lp] != nil {
|
||||
glog.V(4).Infof("Port %s was open before and is still needed", lp.String())
|
||||
replacementPortsMap[lp] = proxier.portsMap[lp]
|
||||
} else {
|
||||
// We do not start listening on SCTP ports, according to our agreement in the
|
||||
// SCTP support KEP
|
||||
} else if svcInfo.GetProtocol() != v1.ProtocolSCTP {
|
||||
socket, err := proxier.portMapper.OpenLocalPort(&lp)
|
||||
if err != nil {
|
||||
glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err)
|
||||
@@ -1030,7 +1027,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
}
|
||||
if lp.Protocol == "udp" {
|
||||
isIPv6 := utilnet.IsIPv6(svcInfo.ClusterIP)
|
||||
conntrack.ClearEntriesForPort(proxier.exec, lp.Port, isIPv6, clientv1.ProtocolUDP)
|
||||
conntrack.ClearEntriesForPort(proxier.exec, lp.Port, isIPv6, v1.ProtocolUDP)
|
||||
}
|
||||
replacementPortsMap[lp] = socket
|
||||
} // We're holding the port, so it's OK to install ipvs rules.
|
||||
@@ -1050,6 +1047,8 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
nodePortSet = proxier.ipsetList[kubeNodePortSetTCP]
|
||||
case "udp":
|
||||
nodePortSet = proxier.ipsetList[kubeNodePortSetUDP]
|
||||
case "sctp":
|
||||
nodePortSet = proxier.ipsetList[kubeNodePortSetSCTP]
|
||||
default:
|
||||
// It should never hit
|
||||
glog.Errorf("Unsupported protocol type: %s", protocol)
|
||||
@@ -1070,6 +1069,8 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
nodePortLocalSet = proxier.ipsetList[kubeNodePortLocalSetTCP]
|
||||
case "udp":
|
||||
nodePortLocalSet = proxier.ipsetList[kubeNodePortLocalSetUDP]
|
||||
case "sctp":
|
||||
nodePortLocalSet = proxier.ipsetList[kubeNodePortLocalSetSCTP]
|
||||
default:
|
||||
// It should never hit
|
||||
glog.Errorf("Unsupported protocol type: %s", protocol)
|
||||
@@ -1104,14 +1105,14 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
Protocol: string(svcInfo.Protocol),
|
||||
Scheduler: proxier.ipvsScheduler,
|
||||
}
|
||||
if svcInfo.SessionAffinityType == api.ServiceAffinityClientIP {
|
||||
if svcInfo.SessionAffinityType == v1.ServiceAffinityClientIP {
|
||||
serv.Flags |= utilipvs.FlagPersistent
|
||||
serv.Timeout = uint32(svcInfo.StickyMaxAgeSeconds)
|
||||
}
|
||||
// There is no need to bind Node IP to dummy interface, so set parameter `bindAddr` to `false`.
|
||||
if err := proxier.syncService(svcNameString, serv, false); err == nil {
|
||||
activeIPVSServices[serv.String()] = true
|
||||
if err := proxier.syncEndpoint(svcName, false, serv); err != nil {
|
||||
if err := proxier.syncEndpoint(svcName, svcInfo.OnlyNodeLocalEndpoints, serv); err != nil {
|
||||
glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err)
|
||||
}
|
||||
} else {
|
||||
@@ -1135,6 +1136,8 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
proxier.iptablesData.Reset()
|
||||
proxier.iptablesData.Write(proxier.natChains.Bytes())
|
||||
proxier.iptablesData.Write(proxier.natRules.Bytes())
|
||||
proxier.iptablesData.Write(proxier.filterChains.Bytes())
|
||||
proxier.iptablesData.Write(proxier.filterRules.Bytes())
|
||||
|
||||
glog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes())
|
||||
err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
|
||||
@@ -1164,6 +1167,14 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
}
|
||||
proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices)
|
||||
|
||||
// Clean up legacy bind address
|
||||
// currentBindAddrs represents ip addresses bind to DefaultDummyDevice from the system
|
||||
currentBindAddrs, err := proxier.netlinkHandle.ListBindAddress(DefaultDummyDevice)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get bind address, err: %v", err)
|
||||
}
|
||||
proxier.cleanLegacyBindAddr(activeBindAddrs, currentBindAddrs)
|
||||
|
||||
// Update healthz timestamp
|
||||
if proxier.healthzServer != nil {
|
||||
proxier.healthzServer.UpdateTimestamp()
|
||||
@@ -1182,7 +1193,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// Finish housekeeping.
|
||||
// TODO: these could be made more consistent.
|
||||
for _, svcIP := range staleServices.UnsortedList() {
|
||||
if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, clientv1.ProtocolUDP); err != nil {
|
||||
if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil {
|
||||
glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err)
|
||||
}
|
||||
}
|
||||
@@ -1207,8 +1218,11 @@ func (proxier *Proxier) writeIptablesRules() {
|
||||
|
||||
for _, set := range ipsetWithIptablesChain {
|
||||
if _, find := proxier.ipsetList[set.name]; find && !proxier.ipsetList[set.name].isEmpty() {
|
||||
args = append(args[:0],
|
||||
"-A", set.from,
|
||||
args = append(args[:0], "-A", set.from)
|
||||
if set.protocolMatch != "" {
|
||||
args = append(args, "-p", set.protocolMatch)
|
||||
}
|
||||
args = append(args,
|
||||
"-m", "comment", "--comment", proxier.ipsetList[set.name].getComment(),
|
||||
"-m", "set", "--match-set", set.name,
|
||||
set.matchType,
|
||||
@@ -1267,39 +1281,12 @@ func (proxier *Proxier) writeIptablesRules() {
|
||||
writeLine(proxier.natRules, append(dstLocalOnlyArgs, "-j", "ACCEPT")...)
|
||||
}
|
||||
|
||||
if !proxier.ipsetList[kubeNodePortSetUDP].isEmpty() {
|
||||
// accept for nodeports w/ externaltrafficpolicy=local
|
||||
args = append(args[:0],
|
||||
"-A", string(kubeServicesChain),
|
||||
"-m", "udp", "-p", "udp",
|
||||
"-m", "comment", "--comment", proxier.ipsetList[kubeNodePortSetUDP].getComment(),
|
||||
"-m", "set", "--match-set", kubeNodePortSetUDP,
|
||||
"dst",
|
||||
)
|
||||
writeLine(proxier.natRules, append(args, "-j", string(KubeNodePortChain))...)
|
||||
if !proxier.ipsetList[kubeNodePortLocalSetUDP].isEmpty() {
|
||||
args = append(args[:0],
|
||||
"-A", string(KubeNodePortChain),
|
||||
"-m", "comment", "--comment", proxier.ipsetList[kubeNodePortLocalSetUDP].getComment(),
|
||||
"-m", "set", "--match-set", kubeNodePortLocalSetUDP,
|
||||
"dst",
|
||||
)
|
||||
writeLine(proxier.natRules, append(args, "-j", "ACCEPT")...)
|
||||
}
|
||||
// mark masq for others
|
||||
args = append(args[:0],
|
||||
"-A", string(KubeNodePortChain),
|
||||
"-m", "comment", "--comment",
|
||||
fmt.Sprintf(`"mark MASQ for externaltrafficpolicy=cluster"`),
|
||||
)
|
||||
writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...)
|
||||
}
|
||||
|
||||
// mark masq for KUBE-NODE-PORT
|
||||
writeLine(proxier.natRules, []string{
|
||||
"-A", string(KubeNodePortChain),
|
||||
"-j", string(KubeMarkMasqChain),
|
||||
}...)
|
||||
// -A KUBE-SERVICES -m addrtype --dst-type LOCAL -j KUBE-NODE-PORT
|
||||
args = append(args[:0],
|
||||
"-A", string(kubeServicesChain),
|
||||
"-m", "addrtype", "--dst-type", "LOCAL",
|
||||
)
|
||||
writeLine(proxier.natRules, append(args, "-j", string(KubeNodePortChain))...)
|
||||
|
||||
// mark drop for KUBE-LOAD-BALANCER
|
||||
writeLine(proxier.natRules, []string{
|
||||
@@ -1379,8 +1366,8 @@ func (proxier *Proxier) acceptIPVSTraffic() {
|
||||
|
||||
// createAndLinkeKubeChain create all kube chains that ipvs proxier need and write basic link.
|
||||
func (proxier *Proxier) createAndLinkeKubeChain() {
|
||||
existingFilterChains := proxier.getExistingChains(utiliptables.TableFilter)
|
||||
existingNATChains := proxier.getExistingChains(utiliptables.TableNAT)
|
||||
existingFilterChains := proxier.getExistingChains(proxier.filterChainsData, utiliptables.TableFilter)
|
||||
existingNATChains := proxier.getExistingChains(proxier.iptablesData, utiliptables.TableNAT)
|
||||
|
||||
// Make sure we keep stats for the top-level chains
|
||||
for _, ch := range iptablesChains {
|
||||
@@ -1390,13 +1377,13 @@ func (proxier *Proxier) createAndLinkeKubeChain() {
|
||||
}
|
||||
if ch.table == utiliptables.TableNAT {
|
||||
if chain, ok := existingNATChains[ch.chain]; ok {
|
||||
writeLine(proxier.natChains, chain)
|
||||
writeBytesLine(proxier.natChains, chain)
|
||||
} else {
|
||||
writeLine(proxier.natChains, utiliptables.MakeChainLine(kubePostroutingChain))
|
||||
}
|
||||
} else {
|
||||
if chain, ok := existingFilterChains[KubeForwardChain]; ok {
|
||||
writeLine(proxier.filterChains, chain)
|
||||
writeBytesLine(proxier.filterChains, chain)
|
||||
} else {
|
||||
writeLine(proxier.filterChains, utiliptables.MakeChainLine(KubeForwardChain))
|
||||
}
|
||||
@@ -1431,13 +1418,14 @@ func (proxier *Proxier) createAndLinkeKubeChain() {
|
||||
|
||||
// getExistingChains get iptables-save output so we can check for existing chains and rules.
|
||||
// This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore
|
||||
func (proxier *Proxier) getExistingChains(table utiliptables.Table) map[utiliptables.Chain]string {
|
||||
proxier.iptablesData.Reset()
|
||||
err := proxier.iptables.SaveInto(table, proxier.iptablesData)
|
||||
// Result may SHARE memory with contents of buffer.
|
||||
func (proxier *Proxier) getExistingChains(buffer *bytes.Buffer, table utiliptables.Table) map[utiliptables.Chain][]byte {
|
||||
buffer.Reset()
|
||||
err := proxier.iptables.SaveInto(table, buffer)
|
||||
if err != nil { // if we failed to get any rules
|
||||
glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err)
|
||||
} else { // otherwise parse the output
|
||||
return utiliptables.GetChainLines(table, proxier.iptablesData.Bytes())
|
||||
return utiliptables.GetChainLines(table, buffer.Bytes())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1447,9 +1435,9 @@ func (proxier *Proxier) getExistingChains(table utiliptables.Table) map[utilipta
|
||||
// This assumes the proxier mutex is held
|
||||
func (proxier *Proxier) deleteEndpointConnections(connectionMap []proxy.ServiceEndpoint) {
|
||||
for _, epSvcPair := range connectionMap {
|
||||
if svcInfo, ok := proxier.serviceMap[epSvcPair.ServicePortName]; ok && svcInfo.GetProtocol() == api.ProtocolUDP {
|
||||
if svcInfo, ok := proxier.serviceMap[epSvcPair.ServicePortName]; ok && svcInfo.GetProtocol() == v1.ProtocolUDP {
|
||||
endpointIP := utilproxy.IPPart(epSvcPair.Endpoint)
|
||||
err := conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIPString(), endpointIP, clientv1.ProtocolUDP)
|
||||
err := conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIPString(), endpointIP, v1.ProtocolUDP)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err)
|
||||
}
|
||||
@@ -1481,6 +1469,7 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer,
|
||||
// bind service address to dummy interface even if service not changed,
|
||||
// in case that service IP was removed by other processes
|
||||
if bindAddr {
|
||||
glog.V(4).Infof("Bind addr %s", vs.Address.String())
|
||||
_, err := proxier.netlinkHandle.EnsureAddressBind(vs.Address.String(), DefaultDummyDevice)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to bind service address to dummy device %q: %v", svcName, err)
|
||||
@@ -1518,66 +1507,93 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode
|
||||
newEndpoints.Insert(epInfo.String())
|
||||
}
|
||||
|
||||
if !curEndpoints.Equal(newEndpoints) {
|
||||
// Create new endpoints
|
||||
for _, ep := range newEndpoints.Difference(curEndpoints).UnsortedList() {
|
||||
ip, port, err := net.SplitHostPort(ep)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err)
|
||||
continue
|
||||
}
|
||||
portNum, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err)
|
||||
continue
|
||||
}
|
||||
// Create new endpoints
|
||||
for _, ep := range newEndpoints.List() {
|
||||
ip, port, err := net.SplitHostPort(ep)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err)
|
||||
continue
|
||||
}
|
||||
portNum, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err)
|
||||
continue
|
||||
}
|
||||
|
||||
newDest := &utilipvs.RealServer{
|
||||
Address: net.ParseIP(ip),
|
||||
Port: uint16(portNum),
|
||||
Weight: 1,
|
||||
newDest := &utilipvs.RealServer{
|
||||
Address: net.ParseIP(ip),
|
||||
Port: uint16(portNum),
|
||||
Weight: 1,
|
||||
}
|
||||
|
||||
if curEndpoints.Has(ep) {
|
||||
// check if newEndpoint is in gracefulDelete list, if true, delete this ep immediately
|
||||
uniqueRS := GetUniqueRSName(vs, newDest)
|
||||
if !proxier.gracefuldeleteManager.InTerminationList(uniqueRS) {
|
||||
continue
|
||||
}
|
||||
err = proxier.ipvs.AddRealServer(appliedVirtualServer, newDest)
|
||||
glog.V(5).Infof("new ep %q is in graceful delete list", uniqueRS)
|
||||
err := proxier.gracefuldeleteManager.MoveRSOutofGracefulDeleteList(uniqueRS)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to add destination: %v, error: %v", newDest, err)
|
||||
glog.Errorf("Failed to delete endpoint: %v in gracefulDeleteQueue, error: %v", ep, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Delete old endpoints
|
||||
for _, ep := range curEndpoints.Difference(newEndpoints).UnsortedList() {
|
||||
ip, port, err := net.SplitHostPort(ep)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err)
|
||||
continue
|
||||
}
|
||||
portNum, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err)
|
||||
continue
|
||||
}
|
||||
err = proxier.ipvs.AddRealServer(appliedVirtualServer, newDest)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to add destination: %v, error: %v", newDest, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Delete old endpoints
|
||||
for _, ep := range curEndpoints.Difference(newEndpoints).UnsortedList() {
|
||||
// if curEndpoint is in gracefulDelete, skip
|
||||
uniqueRS := vs.String() + "/" + ep
|
||||
if proxier.gracefuldeleteManager.InTerminationList(uniqueRS) {
|
||||
continue
|
||||
}
|
||||
ip, port, err := net.SplitHostPort(ep)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err)
|
||||
continue
|
||||
}
|
||||
portNum, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err)
|
||||
continue
|
||||
}
|
||||
|
||||
delDest := &utilipvs.RealServer{
|
||||
Address: net.ParseIP(ip),
|
||||
Port: uint16(portNum),
|
||||
}
|
||||
err = proxier.ipvs.DeleteRealServer(appliedVirtualServer, delDest)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete destination: %v, error: %v", delDest, err)
|
||||
continue
|
||||
}
|
||||
delDest := &utilipvs.RealServer{
|
||||
Address: net.ParseIP(ip),
|
||||
Port: uint16(portNum),
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Using graceful delete to delete: %v", delDest)
|
||||
err = proxier.gracefuldeleteManager.GracefulDeleteRS(appliedVirtualServer, delDest)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete destination: %v, error: %v", delDest, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, currentServices map[string]*utilipvs.VirtualServer) {
|
||||
unbindIPAddr := sets.NewString()
|
||||
for cs := range currentServices {
|
||||
svc := currentServices[cs]
|
||||
if _, ok := activeServices[cs]; !ok {
|
||||
// This service was not processed in the latest sync loop so before deleting it,
|
||||
// make sure it does not fall within an excluded CIDR range.
|
||||
okayToDelete := true
|
||||
rsList, _ := proxier.ipvs.GetRealServers(svc)
|
||||
for _, rs := range rsList {
|
||||
uniqueRS := GetUniqueRSName(svc, rs)
|
||||
// if there are in terminating real server in this service, then handle it later
|
||||
if proxier.gracefuldeleteManager.InTerminationList(uniqueRS) {
|
||||
okayToDelete = false
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, excludedCIDR := range proxier.excludeCIDRs {
|
||||
// Any validation of this CIDR already should have occurred.
|
||||
_, n, _ := net.ParseCIDR(excludedCIDR)
|
||||
@@ -1590,16 +1606,21 @@ func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, curre
|
||||
if err := proxier.ipvs.DeleteVirtualServer(svc); err != nil {
|
||||
glog.Errorf("Failed to delete service, error: %v", err)
|
||||
}
|
||||
unbindIPAddr.Insert(svc.Address.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, addr := range unbindIPAddr.UnsortedList() {
|
||||
err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice)
|
||||
// Ignore no such address error when try to unbind address
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err)
|
||||
func (proxier *Proxier) cleanLegacyBindAddr(activeBindAddrs map[string]bool, currentBindAddrs []string) {
|
||||
for _, addr := range currentBindAddrs {
|
||||
if _, ok := activeBindAddrs[addr]; !ok {
|
||||
// This address was not processed in the latest sync loop
|
||||
glog.V(4).Infof("Unbind addr %s", addr)
|
||||
err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice)
|
||||
// Ignore no such address error when try to unbind address
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1617,6 +1638,11 @@ func writeLine(buf *bytes.Buffer, words ...string) {
|
||||
}
|
||||
}
|
||||
|
||||
func writeBytesLine(buf *bytes.Buffer, bytes []byte) {
|
||||
buf.Write(bytes)
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
|
||||
// listenPortOpener opens ports by calling bind() and listen().
|
||||
type listenPortOpener struct{}
|
||||
|
||||
|
Reference in New Issue
Block a user