Move from glog to klog
- Move from the old github.com/golang/glog to k8s.io/klog - klog as explicit InitFlags() so we add them as necessary - we update the other repositories that we vendor that made a similar change from glog to klog * github.com/kubernetes/repo-infra * k8s.io/gengo/ * k8s.io/kube-openapi/ * github.com/google/cadvisor - Entirely remove all references to glog - Fix some tests by explicit InitFlags in their init() methods Change-Id: I92db545ff36fcec83afe98f550c9e630098b3135
This commit is contained in:
@@ -30,7 +30,7 @@ import (
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -107,14 +107,14 @@ type hnsNetworkInfo struct {
|
||||
id string
|
||||
}
|
||||
|
||||
func Log(v interface{}, message string, level glog.Level) {
|
||||
glog.V(level).Infof("%s, %s", message, spew.Sdump(v))
|
||||
func Log(v interface{}, message string, level klog.Level) {
|
||||
klog.V(level).Infof("%s, %s", message, spew.Sdump(v))
|
||||
}
|
||||
|
||||
func LogJson(v interface{}, message string, level glog.Level) {
|
||||
func LogJson(v interface{}, message string, level klog.Level) {
|
||||
jsonString, err := json.Marshal(v)
|
||||
if err == nil {
|
||||
glog.V(level).Infof("%s, %s", message, string(jsonString))
|
||||
klog.V(level).Infof("%s, %s", message, string(jsonString))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,7 +159,7 @@ func (ep *endpointsInfo) Cleanup() {
|
||||
// Never delete a Local Endpoint. Local Endpoints are already created by other entities.
|
||||
// Remove only remote endpoints created by this service
|
||||
if ep.refCount <= 0 && !ep.isLocal {
|
||||
glog.V(4).Infof("Removing endpoints for %v, since no one is referencing it", ep)
|
||||
klog.V(4).Infof("Removing endpoints for %v, since no one is referencing it", ep)
|
||||
deleteHnsEndpoint(ep.hnsID)
|
||||
ep.hnsID = ""
|
||||
}
|
||||
@@ -206,7 +206,7 @@ func newServiceInfo(svcPortName proxy.ServicePortName, port *v1.ServicePort, ser
|
||||
if apiservice.NeedsHealthCheck(service) {
|
||||
p := service.Spec.HealthCheckNodePort
|
||||
if p == 0 {
|
||||
glog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String())
|
||||
klog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String())
|
||||
} else {
|
||||
info.healthCheckNodePort = int(p)
|
||||
}
|
||||
@@ -303,9 +303,9 @@ func (sm *proxyServiceMap) merge(other proxyServiceMap, curEndpoints proxyEndpoi
|
||||
existingPorts.Insert(svcPortName.Port)
|
||||
svcInfo, exists := (*sm)[svcPortName]
|
||||
if !exists {
|
||||
glog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
|
||||
klog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
|
||||
} else {
|
||||
glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
|
||||
klog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
|
||||
svcInfo.cleanupAllPolicies(curEndpoints[svcPortName])
|
||||
delete(*sm, svcPortName)
|
||||
}
|
||||
@@ -321,14 +321,14 @@ func (sm *proxyServiceMap) unmerge(other proxyServiceMap, existingPorts, staleSe
|
||||
}
|
||||
info, exists := (*sm)[svcPortName]
|
||||
if exists {
|
||||
glog.V(1).Infof("Removing service port %q", svcPortName)
|
||||
klog.V(1).Infof("Removing service port %q", svcPortName)
|
||||
if info.protocol == v1.ProtocolUDP {
|
||||
staleServices.Insert(info.clusterIP.String())
|
||||
}
|
||||
info.cleanupAllPolicies(curEndpoints[svcPortName])
|
||||
delete(*sm, svcPortName)
|
||||
} else {
|
||||
glog.Errorf("Service port %q removed, but doesn't exists", svcPortName)
|
||||
klog.Errorf("Service port %q removed, but doesn't exists", svcPortName)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -340,13 +340,13 @@ func (em proxyEndpointsMap) merge(other proxyEndpointsMap, curServices proxyServ
|
||||
if exists {
|
||||
//
|
||||
info, exists := curServices[svcPortName]
|
||||
glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
|
||||
klog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol)
|
||||
if exists {
|
||||
glog.V(2).Infof("Endpoints are modified. Service [%v] is stale", svcPortName)
|
||||
klog.V(2).Infof("Endpoints are modified. Service [%v] is stale", svcPortName)
|
||||
info.cleanupAllPolicies(epInfos)
|
||||
} else {
|
||||
// If no service exists, just cleanup the remote endpoints
|
||||
glog.V(2).Infof("Endpoints are orphaned. Cleaning up")
|
||||
klog.V(2).Infof("Endpoints are orphaned. Cleaning up")
|
||||
// Cleanup Endpoints references
|
||||
for _, ep := range epInfos {
|
||||
ep.Cleanup()
|
||||
@@ -365,11 +365,11 @@ func (em proxyEndpointsMap) unmerge(other proxyEndpointsMap, curServices proxySe
|
||||
for svcPortName := range other {
|
||||
info, exists := curServices[svcPortName]
|
||||
if exists {
|
||||
glog.V(2).Infof("Service [%v] is stale", info)
|
||||
klog.V(2).Infof("Service [%v] is stale", info)
|
||||
info.cleanupAllPolicies(em[svcPortName])
|
||||
} else {
|
||||
// If no service exists, just cleanup the remote endpoints
|
||||
glog.V(2).Infof("Endpoints are orphaned. Cleaning up")
|
||||
klog.V(2).Infof("Endpoints are orphaned. Cleaning up")
|
||||
// Cleanup Endpoints references
|
||||
epInfos, exists := em[svcPortName]
|
||||
if exists {
|
||||
@@ -470,12 +470,12 @@ func NewProxier(
|
||||
masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue)
|
||||
|
||||
if nodeIP == nil {
|
||||
glog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP")
|
||||
klog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP")
|
||||
nodeIP = net.ParseIP("127.0.0.1")
|
||||
}
|
||||
|
||||
if len(clusterCIDR) == 0 {
|
||||
glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic")
|
||||
klog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic")
|
||||
}
|
||||
|
||||
healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps
|
||||
@@ -487,11 +487,11 @@ func NewProxier(
|
||||
}
|
||||
hnsNetwork, err := getHnsNetworkInfo(hnsNetworkName)
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to find Hns Network specified by %s. Please check environment variable KUBE_NETWORK", hnsNetworkName)
|
||||
klog.Fatalf("Unable to find Hns Network specified by %s. Please check environment variable KUBE_NETWORK", hnsNetworkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(1).Infof("Hns Network loaded with info = %v", hnsNetwork)
|
||||
klog.V(1).Infof("Hns Network loaded with info = %v", hnsNetwork)
|
||||
|
||||
proxier := &Proxier{
|
||||
portsMap: make(map[localPort]closeable),
|
||||
@@ -511,7 +511,7 @@ func NewProxier(
|
||||
}
|
||||
|
||||
burstSyncs := 2
|
||||
glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs)
|
||||
klog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs)
|
||||
proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs)
|
||||
return proxier, nil
|
||||
|
||||
@@ -568,7 +568,7 @@ func deleteAllHnsLoadBalancerPolicy() {
|
||||
LogJson(plist, "Remove Policy", 3)
|
||||
_, err = plist.Delete()
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -629,36 +629,36 @@ func deleteHnsLoadBalancerPolicy(hnsID string) {
|
||||
// Cleanup HNS policies
|
||||
hnsloadBalancer, err := hcsshim.GetPolicyListByID(hnsID)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
klog.Errorf("%v", err)
|
||||
return
|
||||
}
|
||||
LogJson(hnsloadBalancer, "Removing Policy", 2)
|
||||
|
||||
_, err = hnsloadBalancer.Delete()
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func deleteHnsEndpoint(hnsID string) {
|
||||
hnsendpoint, err := hcsshim.GetHNSEndpointByID(hnsID)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
klog.Errorf("%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = hnsendpoint.Delete()
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Remote endpoint resource deleted id %s", hnsID)
|
||||
klog.V(3).Infof("Remote endpoint resource deleted id %s", hnsID)
|
||||
}
|
||||
|
||||
func getHnsNetworkInfo(hnsNetworkName string) (*hnsNetworkInfo, error) {
|
||||
hnsnetwork, err := hcsshim.GetHNSNetworkByName(hnsNetworkName)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
klog.Errorf("%v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -671,7 +671,7 @@ func getHnsNetworkInfo(hnsNetworkName string) (*hnsNetworkInfo, error) {
|
||||
func getHnsEndpointByIpAddress(ip net.IP, networkName string) (*hcsshim.HNSEndpoint, error) {
|
||||
hnsnetwork, err := hcsshim.GetHNSNetworkByName(networkName)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
klog.Errorf("%v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -746,12 +746,12 @@ func (proxier *Proxier) OnServiceSynced() {
|
||||
func shouldSkipService(svcName types.NamespacedName, service *v1.Service) bool {
|
||||
// if ClusterIP is "None" or empty, skip proxying
|
||||
if !helper.IsServiceIPSet(service) {
|
||||
glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
|
||||
klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
|
||||
return true
|
||||
}
|
||||
// Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied
|
||||
if service.Spec.Type == v1.ServiceTypeExternalName {
|
||||
glog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName)
|
||||
klog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@@ -880,7 +880,7 @@ func endpointsToEndpointsMap(endpoints *v1.Endpoints, hostname string) proxyEndp
|
||||
for i := range ss.Ports {
|
||||
port := &ss.Ports[i]
|
||||
if port.Port == 0 {
|
||||
glog.Warningf("ignoring invalid endpoint port %s", port.Name)
|
||||
klog.Warningf("ignoring invalid endpoint port %s", port.Name)
|
||||
continue
|
||||
}
|
||||
svcPortName := proxy.ServicePortName{
|
||||
@@ -890,19 +890,19 @@ func endpointsToEndpointsMap(endpoints *v1.Endpoints, hostname string) proxyEndp
|
||||
for i := range ss.Addresses {
|
||||
addr := &ss.Addresses[i]
|
||||
if addr.IP == "" {
|
||||
glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name)
|
||||
klog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name)
|
||||
continue
|
||||
}
|
||||
isLocal := addr.NodeName != nil && *addr.NodeName == hostname
|
||||
epInfo := newEndpointInfo(addr.IP, uint16(port.Port), isLocal)
|
||||
endpointsMap[svcPortName] = append(endpointsMap[svcPortName], epInfo)
|
||||
}
|
||||
if glog.V(3) {
|
||||
if klog.V(3) {
|
||||
newEPList := []*endpointsInfo{}
|
||||
for _, ep := range endpointsMap[svcPortName] {
|
||||
newEPList = append(newEPList, ep)
|
||||
}
|
||||
glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList)
|
||||
klog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -939,11 +939,11 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
SyncProxyRulesLatency.Observe(sinceInMicroseconds(start))
|
||||
glog.V(4).Infof("syncProxyRules took %v", time.Since(start))
|
||||
klog.V(4).Infof("syncProxyRules took %v", time.Since(start))
|
||||
}()
|
||||
// don't sync rules till we've received services and endpoints
|
||||
if !proxier.endpointsSynced || !proxier.servicesSynced {
|
||||
glog.V(2).Info("Not syncing hns until Services and Endpoints have been received from master")
|
||||
klog.V(2).Info("Not syncing hns until Services and Endpoints have been received from master")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -957,22 +957,22 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// merge stale services gathered from updateEndpointsMap
|
||||
for svcPortName := range endpointUpdateResult.staleServiceNames {
|
||||
if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.protocol == v1.ProtocolUDP {
|
||||
glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String())
|
||||
klog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String())
|
||||
staleServices.Insert(svcInfo.clusterIP.String())
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Syncing Policies")
|
||||
klog.V(3).Infof("Syncing Policies")
|
||||
|
||||
// Program HNS by adding corresponding policies for each service.
|
||||
for svcName, svcInfo := range proxier.serviceMap {
|
||||
if svcInfo.policyApplied {
|
||||
glog.V(4).Infof("Policy already applied for %s", spew.Sdump(svcInfo))
|
||||
klog.V(4).Infof("Policy already applied for %s", spew.Sdump(svcInfo))
|
||||
continue
|
||||
}
|
||||
|
||||
var hnsEndpoints []hcsshim.HNSEndpoint
|
||||
glog.V(4).Infof("====Applying Policy for %s====", svcName)
|
||||
klog.V(4).Infof("====Applying Policy for %s====", svcName)
|
||||
// Create Remote endpoints for every endpoint, corresponding to the service
|
||||
|
||||
for _, ep := range proxier.endpointsMap[svcName] {
|
||||
@@ -1000,13 +1000,13 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
|
||||
if newHnsEndpoint == nil {
|
||||
if ep.isLocal {
|
||||
glog.Errorf("Local endpoint not found for %v: err: %v on network %s", ep.ip, err, hnsNetworkName)
|
||||
klog.Errorf("Local endpoint not found for %v: err: %v on network %s", ep.ip, err, hnsNetworkName)
|
||||
continue
|
||||
}
|
||||
// hns Endpoint resource was not found, create one
|
||||
hnsnetwork, err := hcsshim.GetHNSNetworkByName(hnsNetworkName)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
klog.Errorf("%v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -1017,7 +1017,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
|
||||
newHnsEndpoint, err = hnsnetwork.CreateRemoteEndpoint(hnsEndpoint)
|
||||
if err != nil {
|
||||
glog.Errorf("Remote endpoint creation failed: %v", err)
|
||||
klog.Errorf("Remote endpoint creation failed: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -1030,19 +1030,19 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
Log(ep, "Endpoint resource found", 3)
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Associated endpoints [%s] for service [%s]", spew.Sdump(hnsEndpoints), svcName)
|
||||
klog.V(3).Infof("Associated endpoints [%s] for service [%s]", spew.Sdump(hnsEndpoints), svcName)
|
||||
|
||||
if len(svcInfo.hnsID) > 0 {
|
||||
// This should not happen
|
||||
glog.Warningf("Load Balancer already exists %s -- Debug ", svcInfo.hnsID)
|
||||
klog.Warningf("Load Balancer already exists %s -- Debug ", svcInfo.hnsID)
|
||||
}
|
||||
|
||||
if len(hnsEndpoints) == 0 {
|
||||
glog.Errorf("Endpoint information not available for service %s. Not applying any policy", svcName)
|
||||
klog.Errorf("Endpoint information not available for service %s. Not applying any policy", svcName)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Trying to Apply Policies for service %s", spew.Sdump(svcInfo))
|
||||
klog.V(4).Infof("Trying to Apply Policies for service %s", spew.Sdump(svcInfo))
|
||||
var hnsLoadBalancer *hcsshim.PolicyList
|
||||
|
||||
hnsLoadBalancer, err := getHnsLoadBalancer(
|
||||
@@ -1054,12 +1054,12 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
uint16(svcInfo.port),
|
||||
)
|
||||
if err != nil {
|
||||
glog.Errorf("Policy creation failed: %v", err)
|
||||
klog.Errorf("Policy creation failed: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
svcInfo.hnsID = hnsLoadBalancer.ID
|
||||
glog.V(3).Infof("Hns LoadBalancer resource created for cluster ip resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID)
|
||||
klog.V(3).Infof("Hns LoadBalancer resource created for cluster ip resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID)
|
||||
|
||||
// If nodePort is specified, user should be able to use nodeIP:nodePort to reach the backend endpoints
|
||||
if svcInfo.nodePort > 0 {
|
||||
@@ -1072,12 +1072,12 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
uint16(svcInfo.nodePort),
|
||||
)
|
||||
if err != nil {
|
||||
glog.Errorf("Policy creation failed: %v", err)
|
||||
klog.Errorf("Policy creation failed: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
svcInfo.nodePorthnsID = hnsLoadBalancer.ID
|
||||
glog.V(3).Infof("Hns LoadBalancer resource created for nodePort resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID)
|
||||
klog.V(3).Infof("Hns LoadBalancer resource created for nodePort resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID)
|
||||
}
|
||||
|
||||
// Create a Load Balancer Policy for each external IP
|
||||
@@ -1092,11 +1092,11 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
uint16(svcInfo.port),
|
||||
)
|
||||
if err != nil {
|
||||
glog.Errorf("Policy creation failed: %v", err)
|
||||
klog.Errorf("Policy creation failed: %v", err)
|
||||
continue
|
||||
}
|
||||
externalIp.hnsID = hnsLoadBalancer.ID
|
||||
glog.V(3).Infof("Hns LoadBalancer resource created for externalIp resources %v, Id[%s]", externalIp, hnsLoadBalancer.ID)
|
||||
klog.V(3).Infof("Hns LoadBalancer resource created for externalIp resources %v, Id[%s]", externalIp, hnsLoadBalancer.ID)
|
||||
}
|
||||
// Create a Load Balancer Policy for each loadbalancer ingress
|
||||
for _, lbIngressIp := range svcInfo.loadBalancerIngressIPs {
|
||||
@@ -1110,11 +1110,11 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
uint16(svcInfo.port),
|
||||
)
|
||||
if err != nil {
|
||||
glog.Errorf("Policy creation failed: %v", err)
|
||||
klog.Errorf("Policy creation failed: %v", err)
|
||||
continue
|
||||
}
|
||||
lbIngressIp.hnsID = hnsLoadBalancer.ID
|
||||
glog.V(3).Infof("Hns LoadBalancer resource created for loadBalancer Ingress resources %v", lbIngressIp)
|
||||
klog.V(3).Infof("Hns LoadBalancer resource created for loadBalancer Ingress resources %v", lbIngressIp)
|
||||
}
|
||||
svcInfo.policyApplied = true
|
||||
Log(svcInfo, "+++Policy Successfully applied for service +++", 2)
|
||||
@@ -1129,17 +1129,17 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// not "OnlyLocal", but the services list will not, and the healthChecker
|
||||
// will just drop those endpoints.
|
||||
if err := proxier.healthChecker.SyncServices(serviceUpdateResult.hcServices); err != nil {
|
||||
glog.Errorf("Error syncing healtcheck services: %v", err)
|
||||
klog.Errorf("Error syncing healtcheck services: %v", err)
|
||||
}
|
||||
if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.hcEndpoints); err != nil {
|
||||
glog.Errorf("Error syncing healthcheck endpoints: %v", err)
|
||||
klog.Errorf("Error syncing healthcheck endpoints: %v", err)
|
||||
}
|
||||
|
||||
// Finish housekeeping.
|
||||
// TODO: these could be made more consistent.
|
||||
for _, svcIP := range staleServices.UnsortedList() {
|
||||
// TODO : Check if this is required to cleanup stale services here
|
||||
glog.V(5).Infof("Pending delete stale service IP %s connections", svcIP)
|
||||
klog.V(5).Infof("Pending delete stale service IP %s connections", svcIP)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user