Merge pull request #8707 from justinsb/nodeport

WIP: ServiceType & NodePort work
This commit is contained in:
Dawn Chen
2015-05-22 16:46:59 -07:00
70 changed files with 3788 additions and 821 deletions

View File

@@ -86,6 +86,7 @@ type APIServer struct {
CorsAllowedOriginList util.StringList
AllowPrivileged bool
PortalNet util.IPNet // TODO: make this a list
ServiceNodePorts util.PortRange
EnableLogsSupport bool
MasterServiceNamespace string
RuntimeConfig util.ConfigurationMap
@@ -183,6 +184,7 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) {
fs.Var(&s.CorsAllowedOriginList, "cors-allowed-origins", "List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled.")
fs.BoolVar(&s.AllowPrivileged, "allow-privileged", s.AllowPrivileged, "If true, allow privileged containers.")
fs.Var(&s.PortalNet, "portal-net", "A CIDR notation IP range from which to assign portal IPs. This must not overlap with any IP ranges assigned to nodes for pods.")
fs.Var(&s.ServiceNodePorts, "service-node-ports", "A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range.")
fs.StringVar(&s.MasterServiceNamespace, "master-service-namespace", s.MasterServiceNamespace, "The namespace from which the kubernetes master services should be injected into pods")
fs.Var(&s.RuntimeConfig, "runtime-config", "A set of key=value pairs that describe runtime configuration that may be passed to the apiserver. api/<version> key can be used to turn on/off specific api versions. api/all and api/legacy are special keys to control all and legacy api versions respectively.")
client.BindKubeletClientConfigFlags(fs, &s.KubeletConfig)

View File

@@ -629,6 +629,7 @@ _kubectl_expose()
flags+=("--target-port=")
flags+=("--template=")
two_word_flags+=("-t")
flags+=("--type=")
must_have_one_flag=()
must_have_one_flag+=("--port=")

View File

@@ -12,7 +12,7 @@ selector for a new Service on the specified port. If no labels are specified, th
re-use the labels from the resource it exposes.
```
kubectl expose RESOURCE NAME --port=port [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [--public-ip=ip] [--create-external-load-balancer=bool]
kubectl expose RESOURCE NAME --port=port [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [--public-ip=ip] [--type=type]
```
### Examples
@@ -32,7 +32,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream
```
--container-port="": Synonym for --target-port
--create-external-load-balancer=false: If true, create an external load balancer for this service. Implementation is cloud provider dependent. Default is 'false'.
--create-external-load-balancer=false: If true, create an external load balancer for this service (trumped by --type). Implementation is cloud provider dependent. Default is 'false'.
--dry-run=false: If true, only print the object that would be sent, without creating it.
--generator="service/v1": The name of the API generator to use. Default is 'service/v1'.
-h, --help=false: help for expose
@@ -48,6 +48,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream
--selector="": A label selector to use for this service. If empty (the default) infer the selector from the replication controller.
--target-port="": Name or number for the port on the container that the service should direct traffic to. Optional.
-t, --template="": Template string or path to template file to use when -o=template or -o=templatefile. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]
--type="": Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is 'ClusterIP' unless --create-external-load-balancer is specified.
```
### Options inherited from parent commands

View File

@@ -28,7 +28,7 @@ re\-use the labels from the resource it exposes.
.PP
\fB\-\-create\-external\-load\-balancer\fP=false
If true, create an external load balancer for this service. Implementation is cloud provider dependent. Default is 'false'.
If true, create an external load balancer for this service (trumped by \-\-type). Implementation is cloud provider dependent. Default is 'false'.
.PP
\fB\-\-dry\-run\fP=false
@@ -91,6 +91,10 @@ re\-use the labels from the resource it exposes.
Template string or path to template file to use when \-o=template or \-o=templatefile. The template format is golang templates [
\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]]
.PP
\fB\-\-type\fP=""
Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is 'ClusterIP' unless \-\-create\-external\-load\-balancer is specified.
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP

View File

@@ -35,7 +35,7 @@ On some platforms (for example Google Compute Engine) the kubectl command can in
to do this run:
```bash
kubectl expose rc my-nginx --port=80 --create-external-load-balancer
kubectl expose rc my-nginx --port=80 --type=LoadBalancer
```
This should print the service that has been created, and map an external IP address to the service.

View File

@@ -141,3 +141,40 @@ func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) {
}
return fmt.Sprintf("%x", md5.Sum(data)), nil
}
// TODO: make method on LoadBalancerStatus?
func LoadBalancerStatusEqual(l, r *LoadBalancerStatus) bool {
return ingressSliceEqual(l.Ingress, r.Ingress)
}
func ingressSliceEqual(lhs, rhs []LoadBalancerIngress) bool {
if len(lhs) != len(rhs) {
return false
}
for i := range lhs {
if !ingressEqual(&lhs[i], &rhs[i]) {
return false
}
}
return true
}
func ingressEqual(lhs, rhs *LoadBalancerIngress) bool {
if lhs.IP != rhs.IP {
return false
}
if lhs.Hostname != rhs.Hostname {
return false
}
return true
}
// TODO: make method on LoadBalancerStatus?
func LoadBalancerStatusDeepCopy(lb *LoadBalancerStatus) *LoadBalancerStatus {
c := &LoadBalancerStatus{}
c.Ingress = make([]LoadBalancerIngress, len(lb.Ingress))
for i := range lb.Ingress {
c.Ingress[i] = lb.Ingress[i]
}
return c
}

View File

@@ -35,6 +35,7 @@ func makeValidService() api.Service {
Spec: api.ServiceSpec{
Selector: map[string]string{"key": "val"},
SessionAffinity: "None",
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{Name: "p", Protocol: "TCP", Port: 8675}},
},
}

View File

@@ -186,6 +186,10 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
types := []api.ServiceAffinity{api.ServiceAffinityClientIP, api.ServiceAffinityNone}
*p = types[c.Rand.Intn(len(types))]
},
func(p *api.ServiceType, c fuzz.Continue) {
types := []api.ServiceType{api.ServiceTypeClusterIP, api.ServiceTypeNodePort, api.ServiceTypeLoadBalancer}
*p = types[c.Rand.Intn(len(types))]
},
func(ct *api.Container, c fuzz.Continue) {
c.FuzzNoCustom(ct) // fuzz self without calling this function again
ct.TerminationMessagePath = "/" + ct.TerminationMessagePath // Must be non-empty

View File

@@ -1011,8 +1011,49 @@ const (
ServiceAffinityNone ServiceAffinity = "None"
)
// Service Type string describes ingress methods for a service
type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the portal IP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
// every node, in addition to 'ClusterIP' type.
ServiceTypeNodePort ServiceType = "NodePort"
// ServiceTypeLoadBalancer means a service will be exposed via an
// external load balancer (if the cloud provider supports it), in addition
// to 'NodePort' type.
ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
)
// ServiceStatus represents the current status of a service
type ServiceStatus struct{}
type ServiceStatus struct {
// LoadBalancer contains the current status of the load-balancer,
// if one is present.
LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty"`
}
// LoadBalancerStatus represents the status of a load-balancer
type LoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer;
// traffic intended for the service should be sent to these ingress points.
Ingress []LoadBalancerIngress `json:"ingress,omitempty" description:"load-balancer ingress points"`
}
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based
// (typically GCE or OpenStack load-balancers)
IP string `json:"ip,omitempty" description:"IP address of ingress point"`
// Hostname is set for load-balancer ingress points that are DNS based
// (typically AWS load-balancers)
Hostname string `json:"hostname,omitempty" description:"hostname of ingress point"`
}
// ServiceSpec describes the attributes that a user creates on a service
type ServiceSpec struct {
@@ -1031,14 +1072,13 @@ type ServiceSpec struct {
// None can be specified for headless services when proxying is not required
PortalIP string `json:"portalIP,omitempty"`
// CreateExternalLoadBalancer indicates whether a load balancer should be created for this service.
CreateExternalLoadBalancer bool `json:"createExternalLoadBalancer,omitempty"`
// PublicIPs are used by external load balancers, or can be set by
// Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
Type ServiceType `json:"type,omitempty"`
// DeprecatedPublicIPs are deprecated and silently ignored.
// Old behaviour: PublicIPs are used by external load balancers, or can be set by
// users to handle external traffic that arrives at a node.
// For load balancers, the publicIP will usually be the IP address of the load balancer,
// but some load balancers (notably AWS ELB) use a hostname instead of an IP address.
// For hostnames, the user will use a CNAME record (instead of using an A record with the IP)
PublicIPs []string `json:"publicIPs,omitempty"`
DeprecatedPublicIPs []string `json:"deprecatedPublicIPs,omitempty"`
// Required: Supports "ClientIP" and "None". Used to maintain session affinity.
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"`
@@ -1064,6 +1104,10 @@ type ServicePort struct {
// of v1beta3 the default value is the sames as the Port field (an
// identity map).
TargetPort util.IntOrString `json:"targetPort"`
// The port on each node on which this service is exposed.
// Default is to auto-allocate a port if the ServiceType of this Service requires one.
NodePort int `json:"nodePort" description:"the port on each node on which this service is exposed"`
}
// Service is a named abstraction of software service (for example, mysql) consisting of local port

View File

@@ -812,6 +812,32 @@ func convert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOpt
return nil
}
func convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.LoadBalancerIngress))(in)
}
out.IP = in.IP
out.Hostname = in.Hostname
return nil
}
func convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.LoadBalancerStatus))(in)
}
if in.Ingress != nil {
out.Ingress = make([]LoadBalancerIngress, len(in.Ingress))
for i := range in.Ingress {
if err := convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(&in.Ingress[i], &out.Ingress[i], s); err != nil {
return err
}
}
} else {
out.Ingress = nil
}
return nil
}
func convert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.LocalObjectReference))(in)
@@ -2045,6 +2071,7 @@ func convert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *Service
if err := s.Convert(&in.TargetPort, &out.TargetPort, 0); err != nil {
return err
}
out.NodePort = in.NodePort
return nil
}
@@ -2071,14 +2098,14 @@ func convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *Service
out.Selector = nil
}
out.PortalIP = in.PortalIP
out.CreateExternalLoadBalancer = in.CreateExternalLoadBalancer
if in.PublicIPs != nil {
out.PublicIPs = make([]string, len(in.PublicIPs))
for i := range in.PublicIPs {
out.PublicIPs[i] = in.PublicIPs[i]
out.Type = ServiceType(in.Type)
if in.DeprecatedPublicIPs != nil {
out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs))
for i := range in.DeprecatedPublicIPs {
out.DeprecatedPublicIPs[i] = in.DeprecatedPublicIPs[i]
}
} else {
out.PublicIPs = nil
out.DeprecatedPublicIPs = nil
}
out.SessionAffinity = ServiceAffinity(in.SessionAffinity)
return nil
@@ -2088,6 +2115,9 @@ func convert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *S
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.ServiceStatus))(in)
}
if err := convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
return err
}
return nil
}
@@ -3038,6 +3068,32 @@ func convert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOpt
return nil
}
func convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*LoadBalancerIngress))(in)
}
out.IP = in.IP
out.Hostname = in.Hostname
return nil
}
func convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*LoadBalancerStatus))(in)
}
if in.Ingress != nil {
out.Ingress = make([]api.LoadBalancerIngress, len(in.Ingress))
for i := range in.Ingress {
if err := convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(&in.Ingress[i], &out.Ingress[i], s); err != nil {
return err
}
}
} else {
out.Ingress = nil
}
return nil
}
func convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*LocalObjectReference))(in)
@@ -4271,6 +4327,7 @@ func convert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.Service
if err := s.Convert(&in.TargetPort, &out.TargetPort, 0); err != nil {
return err
}
out.NodePort = in.NodePort
return nil
}
@@ -4297,14 +4354,14 @@ func convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.Service
out.Selector = nil
}
out.PortalIP = in.PortalIP
out.CreateExternalLoadBalancer = in.CreateExternalLoadBalancer
if in.PublicIPs != nil {
out.PublicIPs = make([]string, len(in.PublicIPs))
for i := range in.PublicIPs {
out.PublicIPs[i] = in.PublicIPs[i]
out.Type = api.ServiceType(in.Type)
if in.DeprecatedPublicIPs != nil {
out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs))
for i := range in.DeprecatedPublicIPs {
out.DeprecatedPublicIPs[i] = in.DeprecatedPublicIPs[i]
}
} else {
out.PublicIPs = nil
out.DeprecatedPublicIPs = nil
}
out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity)
return nil
@@ -4314,6 +4371,9 @@ func convert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.S
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*ServiceStatus))(in)
}
if err := convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
return err
}
return nil
}
@@ -4520,6 +4580,8 @@ func init() {
convert_api_ListMeta_To_v1_ListMeta,
convert_api_ListOptions_To_v1_ListOptions,
convert_api_List_To_v1_List,
convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress,
convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus,
convert_api_LocalObjectReference_To_v1_LocalObjectReference,
convert_api_NFSVolumeSource_To_v1_NFSVolumeSource,
convert_api_NamespaceList_To_v1_NamespaceList,
@@ -4629,6 +4691,8 @@ func init() {
convert_v1_ListMeta_To_api_ListMeta,
convert_v1_ListOptions_To_api_ListOptions,
convert_v1_List_To_api_List,
convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress,
convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus,
convert_v1_LocalObjectReference_To_api_LocalObjectReference,
convert_v1_NFSVolumeSource_To_api_NFSVolumeSource,
convert_v1_NamespaceList_To_api_NamespaceList,

View File

@@ -75,6 +75,9 @@ func addDefaultingFuncs() {
if obj.SessionAffinity == "" {
obj.SessionAffinity = ServiceAffinityNone
}
if obj.Type == "" {
obj.Type = ServiceTypeClusterIP
}
for i := range obj.Ports {
sp := &obj.Ports[i]
if sp.Protocol == "" {

View File

@@ -233,7 +233,10 @@ func TestSetDefaultService(t *testing.T) {
obj2 := roundTrip(t, runtime.Object(svc))
svc2 := obj2.(*versioned.Service)
if svc2.Spec.SessionAffinity != versioned.ServiceAffinityNone {
t.Errorf("Expected default sesseion affinity type:%s, got: %s", versioned.ServiceAffinityNone, svc2.Spec.SessionAffinity)
t.Errorf("Expected default session affinity type:%s, got: %s", versioned.ServiceAffinityNone, svc2.Spec.SessionAffinity)
}
if svc2.Spec.Type != versioned.ServiceTypeClusterIP {
t.Errorf("Expected default type:%s, got: %s", versioned.ServiceTypeClusterIP, svc2.Spec.Type)
}
}

View File

@@ -993,8 +993,49 @@ const (
ServiceAffinityNone ServiceAffinity = "None"
)
// Service Type string describes ingress methods for a service
type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the portal IP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
// every node, in addition to 'ClusterIP' type.
ServiceTypeNodePort ServiceType = "NodePort"
// ServiceTypeLoadBalancer means a service will be exposed via an
// external load balancer (if the cloud provider supports it), in addition
// to 'NodePort' type.
ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
)
// ServiceStatus represents the current status of a service
type ServiceStatus struct{}
type ServiceStatus struct {
// LoadBalancer contains the current status of the load-balancer,
// if one is present.
LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" description:"status of load-balancer"`
}
// LoadBalancerStatus represents the status of a load-balancer
type LoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer;
// traffic intended for the service should be sent to these ingress points.
Ingress []LoadBalancerIngress `json:"ingress,omitempty" description:"load-balancer ingress points"`
}
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based
// (typically GCE or OpenStack load-balancers)
IP string `json:"ip,omitempty" description:"IP address of ingress point"`
// Hostname is set for load-balancer ingress points that are DNS based
// (typically AWS load-balancers)
Hostname string `json:"hostname,omitempty" description:"hostname of ingress point"`
}
// ServiceSpec describes the attributes that a user creates on a service
type ServiceSpec struct {
@@ -1011,12 +1052,12 @@ type ServiceSpec struct {
// None can be specified for headless services when proxying is not required
PortalIP string `json:"portalIP,omitempty description: IP address of the service; usually assigned by the system; if specified, it will be allocated to the service if unused, and creation of the service will fail otherwise; cannot be updated; 'None' can be specified for a headless service when proxying is not required"`
// CreateExternalLoadBalancer indicates whether a load balancer should be created for this service.
CreateExternalLoadBalancer bool `json:"createExternalLoadBalancer,omitempty" description:"set up a cloud-provider-specific load balancer on an external IP"`
// Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
Type ServiceType `json:"type,omitempty" description:"type of this service; must be ClusterIP, NodePort, or LoadBalancer; defaults to ClusterIP"`
// PublicIPs are used by external load balancers, or can be set by
// Deprecated. PublicIPs are used by external load balancers, or can be set by
// users to handle external traffic that arrives at a node.
PublicIPs []string `json:"publicIPs,omitempty" description:"externally visible IPs (e.g. load balancers) that should be proxied to this service"`
DeprecatedPublicIPs []string `json:"deprecatedPublicIPs,omitempty" description:"deprecated. externally visible IPs (e.g. load balancers) that should be proxied to this service"`
// Optional: Supports "ClientIP" and "None". Used to maintain session affinity.
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" description:"enable client IP based session affinity; must be ClientIP or None; defaults to None"`
@@ -1041,6 +1082,10 @@ type ServicePort struct {
// target Pod's container ports. If this is not specified, the value
// of Port is used (an identity map).
TargetPort util.IntOrString `json:"targetPort,omitempty" description:"the port to access on the pods targeted by the service; defaults to the service port"`
// The port on each node on which this service is exposed.
// Default is to auto-allocate a port if the ServiceType of this Service requires one.
NodePort int `json:"nodePort" description:"the port on each node on which this service is exposed"`
}
// Service is a named abstraction of software service (for example, mysql) consisting of local port

View File

@@ -774,19 +774,28 @@ func addConversionFuncs() {
Port: in.Spec.Ports[i].Port,
Protocol: Protocol(in.Spec.Ports[i].Protocol),
ContainerPort: in.Spec.Ports[i].TargetPort,
NodePort: in.Spec.Ports[i].NodePort,
})
}
if err := s.Convert(&in.Spec.Selector, &out.Selector, 0); err != nil {
return err
}
out.CreateExternalLoadBalancer = in.Spec.CreateExternalLoadBalancer
out.PublicIPs = in.Spec.PublicIPs
out.PublicIPs = in.Spec.DeprecatedPublicIPs
out.PortalIP = in.Spec.PortalIP
if err := s.Convert(&in.Spec.SessionAffinity, &out.SessionAffinity, 0); err != nil {
return err
}
if err := s.Convert(&in.Status.LoadBalancer, &out.LoadBalancerStatus, 0); err != nil {
return err
}
if err := s.Convert(&in.Spec.Type, &out.Type, 0); err != nil {
return err
}
out.CreateExternalLoadBalancer = in.Spec.Type == api.ServiceTypeLoadBalancer
return nil
},
func(in *Service, out *api.Service, s conversion.Scope) error {
@@ -816,6 +825,7 @@ func addConversionFuncs() {
Port: in.Ports[i].Port,
Protocol: api.Protocol(in.Ports[i].Protocol),
TargetPort: in.Ports[i].ContainerPort,
NodePort: in.Ports[i].NodePort,
})
}
}
@@ -823,13 +833,28 @@ func addConversionFuncs() {
if err := s.Convert(&in.Selector, &out.Spec.Selector, 0); err != nil {
return err
}
out.Spec.CreateExternalLoadBalancer = in.CreateExternalLoadBalancer
out.Spec.PublicIPs = in.PublicIPs
out.Spec.DeprecatedPublicIPs = in.PublicIPs
out.Spec.PortalIP = in.PortalIP
if err := s.Convert(&in.SessionAffinity, &out.Spec.SessionAffinity, 0); err != nil {
return err
}
if err := s.Convert(&in.LoadBalancerStatus, &out.Status.LoadBalancer, 0); err != nil {
return err
}
typeIn := in.Type
if typeIn == "" {
if in.CreateExternalLoadBalancer {
typeIn = ServiceTypeLoadBalancer
} else {
typeIn = ServiceTypeClusterIP
}
}
if err := s.Convert(&typeIn, &out.Spec.Type, 0); err != nil {
return err
}
return nil
},

View File

@@ -76,6 +76,15 @@ func addDefaultingFuncs() {
if obj.SessionAffinity == "" {
obj.SessionAffinity = ServiceAffinityNone
}
if obj.Type == "" {
if obj.CreateExternalLoadBalancer {
obj.Type = ServiceTypeLoadBalancer
} else {
obj.Type = ServiceTypeClusterIP
}
} else if obj.Type == ServiceTypeLoadBalancer {
obj.CreateExternalLoadBalancer = true
}
for i := range obj.Ports {
sp := &obj.Ports[i]
if sp.Protocol == "" {

View File

@@ -150,7 +150,20 @@ func TestSetDefaultService(t *testing.T) {
t.Errorf("Expected default protocol :%s, got: %s", versioned.ProtocolTCP, svc2.Protocol)
}
if svc2.SessionAffinity != versioned.ServiceAffinityNone {
t.Errorf("Expected default sesseion affinity type:%s, got: %s", versioned.ServiceAffinityNone, svc2.SessionAffinity)
t.Errorf("Expected default session affinity type:%s, got: %s", versioned.ServiceAffinityNone, svc2.SessionAffinity)
}
if svc2.Type != versioned.ServiceTypeClusterIP {
t.Errorf("Expected default type:%s, got: %s", versioned.ServiceTypeClusterIP, svc2.Type)
}
}
func TestSetDefaultServiceWithLoadbalancer(t *testing.T) {
svc := &versioned.Service{}
svc.CreateExternalLoadBalancer = true
obj2 := roundTrip(t, runtime.Object(svc))
svc2 := obj2.(*versioned.Service)
if svc2.Type != versioned.ServiceTypeLoadBalancer {
t.Errorf("Expected default type:%s, got: %s", versioned.ServiceTypeLoadBalancer, svc2.Type)
}
}

View File

@@ -835,6 +835,24 @@ const (
ServiceAffinityNone ServiceAffinity = "None"
)
// Service Type string describes ingress methods for a service
type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the portal IP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
// every node, in addition to 'ClusterIP' type.
ServiceTypeNodePort ServiceType = "NodePort"
// ServiceTypeLoadBalancer means a service will be exposed via an
// external load balancer (if the cloud provider supports it), in addition
// to 'NodePort' type.
ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
)
const (
// PortalIPNone - do not assign a portal IP
// no proxying required and no environment variables should be created for pods
@@ -873,9 +891,12 @@ type Service struct {
// An external load balancer should be set up via the cloud-provider
CreateExternalLoadBalancer bool `json:"createExternalLoadBalancer,omitempty" description:"set up a cloud-provider-specific load balancer on an external IP"`
// PublicIPs are used by external load balancers, or can be set by
// Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
Type ServiceType `json:"type,omitempty" description:"type of this service; must be ClusterIP, NodePort, or LoadBalancer; defaults to ClusterIP"`
// Deprecated. PublicIPs are used by external load balancers, or can be set by
// users to handle external traffic that arrives at a node.
PublicIPs []string `json:"publicIPs,omitempty" description:"externally visible IPs (e.g. load balancers) that should be proxied to this service"`
PublicIPs []string `json:"publicIPs,omitempty" description:"deprecated. externally visible IPs (e.g. load balancers) that should be proxied to this service"`
// PortalIP is usually assigned by the master. If specified by the user
// we will try to respect it or else fail the request. This field can
@@ -896,6 +917,29 @@ type Service struct {
// array. If this field is not specified, it will be populated from
// the legacy fields.
Ports []ServicePort `json:"ports" description:"ports to be exposed on the service; if this field is specified, the legacy fields (Port, PortName, Protocol, and ContainerPort) will be overwritten by the first member of this array; if this field is not specified, it will be populated from the legacy fields"`
// LoadBalancer contains the current status of the load-balancer,
// if one is present.
LoadBalancerStatus LoadBalancerStatus `json:"loadBalancerStatus,omitempty" description:"status of load-balancer"`
}
// LoadBalancerStatus represents the status of a load-balancer
type LoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer;
// traffic intended for the service should be sent to these ingress points.
Ingress []LoadBalancerIngress `json:"ingress,omitempty" description:"load-balancer ingress points"`
}
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based
// (typically GCE or OpenStack load-balancers)
IP string `json:"ip,omitempty" description:"IP address of ingress point"`
// Hostname is set for load-balancer ingress points that are DNS based
// (typically AWS load-balancers)
Hostname string `json:"hostname,omitempty" description:"hostname of ingress point"`
}
type ServicePort struct {
@@ -918,6 +962,10 @@ type ServicePort struct {
// of Port is used (an identity map) - note this is a different default
// than Service.ContainerPort.
ContainerPort util.IntOrString `json:"containerPort" description:"the port to access on the containers belonging to pods targeted by the service; defaults to the service port"`
// The port on each node on which this service is exposed.
// Default is to auto-allocate a port if the ServiceType of this Service requires one.
NodePort int `json:"nodePort" description:"the port on each node on which this service is exposed"`
}
// ServiceAccount binds together:

View File

@@ -696,19 +696,28 @@ func addConversionFuncs() {
Port: in.Spec.Ports[i].Port,
Protocol: Protocol(in.Spec.Ports[i].Protocol),
ContainerPort: in.Spec.Ports[i].TargetPort,
NodePort: in.Spec.Ports[i].NodePort,
})
}
if err := s.Convert(&in.Spec.Selector, &out.Selector, 0); err != nil {
return err
}
out.CreateExternalLoadBalancer = in.Spec.CreateExternalLoadBalancer
out.PublicIPs = in.Spec.PublicIPs
out.PublicIPs = in.Spec.DeprecatedPublicIPs
out.PortalIP = in.Spec.PortalIP
if err := s.Convert(&in.Spec.SessionAffinity, &out.SessionAffinity, 0); err != nil {
return err
}
if err := s.Convert(&in.Status.LoadBalancer, &out.LoadBalancerStatus, 0); err != nil {
return err
}
if err := s.Convert(&in.Spec.Type, &out.Type, 0); err != nil {
return err
}
out.CreateExternalLoadBalancer = in.Spec.Type == api.ServiceTypeLoadBalancer
return nil
},
func(in *Service, out *api.Service, s conversion.Scope) error {
@@ -738,6 +747,7 @@ func addConversionFuncs() {
Port: in.Ports[i].Port,
Protocol: api.Protocol(in.Ports[i].Protocol),
TargetPort: in.Ports[i].ContainerPort,
NodePort: in.Ports[i].NodePort,
})
}
}
@@ -745,13 +755,28 @@ func addConversionFuncs() {
if err := s.Convert(&in.Selector, &out.Spec.Selector, 0); err != nil {
return err
}
out.Spec.CreateExternalLoadBalancer = in.CreateExternalLoadBalancer
out.Spec.PublicIPs = in.PublicIPs
out.Spec.DeprecatedPublicIPs = in.PublicIPs
out.Spec.PortalIP = in.PortalIP
if err := s.Convert(&in.SessionAffinity, &out.Spec.SessionAffinity, 0); err != nil {
return err
}
if err := s.Convert(&in.LoadBalancerStatus, &out.Status.LoadBalancer, 0); err != nil {
return err
}
typeIn := in.Type
if typeIn == "" {
if in.CreateExternalLoadBalancer {
typeIn = ServiceTypeLoadBalancer
} else {
typeIn = ServiceTypeClusterIP
}
}
if err := s.Convert(&typeIn, &out.Spec.Type, 0); err != nil {
return err
}
return nil
},

View File

@@ -77,6 +77,15 @@ func addDefaultingFuncs() {
if obj.SessionAffinity == "" {
obj.SessionAffinity = ServiceAffinityNone
}
if obj.Type == "" {
if obj.CreateExternalLoadBalancer {
obj.Type = ServiceTypeLoadBalancer
} else {
obj.Type = ServiceTypeClusterIP
}
} else if obj.Type == ServiceTypeLoadBalancer {
obj.CreateExternalLoadBalancer = true
}
for i := range obj.Ports {
sp := &obj.Ports[i]
if sp.Protocol == "" {

View File

@@ -150,7 +150,20 @@ func TestSetDefaultService(t *testing.T) {
t.Errorf("Expected default protocol :%s, got: %s", versioned.ProtocolTCP, svc2.Protocol)
}
if svc2.SessionAffinity != versioned.ServiceAffinityNone {
t.Errorf("Expected default sesseion affinity type:%s, got: %s", versioned.ServiceAffinityNone, svc2.SessionAffinity)
t.Errorf("Expected default session affinity type:%s, got: %s", versioned.ServiceAffinityNone, svc2.SessionAffinity)
}
if svc2.Type != versioned.ServiceTypeClusterIP {
t.Errorf("Expected default type:%s, got: %s", versioned.ServiceTypeClusterIP, svc2.Type)
}
}
func TestSetDefaultServiceWithLoadbalancer(t *testing.T) {
svc := &versioned.Service{}
svc.CreateExternalLoadBalancer = true
obj2 := roundTrip(t, runtime.Object(svc))
svc2 := obj2.(*versioned.Service)
if svc2.Type != versioned.ServiceTypeLoadBalancer {
t.Errorf("Expected default type:%s, got: %s", versioned.ServiceTypeLoadBalancer, svc2.Type)
}
}

View File

@@ -837,6 +837,24 @@ const (
ServiceAffinityNone ServiceAffinity = "None"
)
// Service Type string describes ingress methods for a service
type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the portal IP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
// every node, in addition to 'ClusterIP' type.
ServiceTypeNodePort ServiceType = "NodePort"
// ServiceTypeLoadBalancer means a service will be exposed via an
// external load balancer (if the cloud provider supports it), in addition
// to 'NodePort' type.
ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
)
const (
// PortalIPNone - do not assign a portal IP
// no proxying required and no environment variables should be created for pods
@@ -877,9 +895,12 @@ type Service struct {
// An external load balancer should be set up via the cloud-provider
CreateExternalLoadBalancer bool `json:"createExternalLoadBalancer,omitempty" description:"set up a cloud-provider-specific load balancer on an external IP"`
// PublicIPs are used by external load balancers, or can be set by
// Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
Type ServiceType `json:"type,omitempty" description:"type of this service; must be ClusterIP, NodePort, or LoadBalancer; defaults to ClusterIP"`
// Deprecated. PublicIPs are used by external load balancers, or can be set by
// users to handle external traffic that arrives at a node.
PublicIPs []string `json:"publicIPs,omitempty" description:"externally visible IPs (e.g. load balancers) that should be proxied to this service"`
PublicIPs []string `json:"publicIPs,omitempty" description:"deprecated. externally visible IPs (e.g. load balancers) that should be proxied to this service"`
// PortalIP is usually assigned by the master. If specified by the user
// we will try to respect it or else fail the request. This field can
@@ -900,6 +921,29 @@ type Service struct {
// array. If this field is not specified, it will be populated from
// the legacy fields.
Ports []ServicePort `json:"ports" description:"ports to be exposed on the service; if this field is specified, the legacy fields (Port, PortName, Protocol, and ContainerPort) will be overwritten by the first member of this array; if this field is not specified, it will be populated from the legacy fields"`
// LoadBalancer contains the current status of the load-balancer,
// if one is present.
LoadBalancerStatus LoadBalancerStatus `json:"loadBalancerStatus,omitempty" description:"status of load-balancer"`
}
// LoadBalancerStatus represents the status of a load-balancer
type LoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer;
// traffic intended for the service should be sent to these ingress points.
Ingress []LoadBalancerIngress `json:"ingress,omitempty" description:"load-balancer ingress points"`
}
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based
// (typically GCE or OpenStack load-balancers)
IP string `json:"ip,omitempty" description:"IP address of ingress point"`
// Hostname is set for load-balancer ingress points that are DNS based
// (typically AWS load-balancers)
Hostname string `json:"hostname,omitempty" description:"hostname of ingress point"`
}
type ServicePort struct {
@@ -922,6 +966,10 @@ type ServicePort struct {
// of Port is used (an identity map) - note this is a different default
// than Service.ContainerPort.
ContainerPort util.IntOrString `json:"containerPort" description:"the port to access on the containers belonging to pods targeted by the service; defaults to the service port"`
// The port on each node on which this service is exposed.
// Default is to auto-allocate a port if the ServiceType of this Service requires one.
NodePort int `json:"nodePort" description:"the port on each node on which this service is exposed"`
}
// ServiceAccount binds together:

View File

@@ -29,6 +29,8 @@ func addConversionFuncs() {
err := api.Scheme.AddConversionFuncs(
convert_v1beta3_Container_To_api_Container,
convert_api_Container_To_v1beta3_Container,
convert_v1beta3_ServiceSpec_To_api_ServiceSpec,
convert_api_ServiceSpec_To_v1beta3_ServiceSpec,
)
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
@@ -329,3 +331,92 @@ func convert_api_Container_To_v1beta3_Container(in *api.Container, out *Containe
}
return nil
}
func convert_v1beta3_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*ServiceSpec))(in)
}
if in.Ports != nil {
out.Ports = make([]api.ServicePort, len(in.Ports))
for i := range in.Ports {
if err := convert_v1beta3_ServicePort_To_api_ServicePort(&in.Ports[i], &out.Ports[i], s); err != nil {
return err
}
}
} else {
out.Ports = nil
}
if in.Selector != nil {
out.Selector = make(map[string]string)
for key, val := range in.Selector {
out.Selector[key] = val
}
} else {
out.Selector = nil
}
out.PortalIP = in.PortalIP
typeIn := in.Type
if typeIn == "" {
if in.CreateExternalLoadBalancer {
typeIn = ServiceTypeLoadBalancer
} else {
typeIn = ServiceTypeClusterIP
}
}
if err := s.Convert(&typeIn, &out.Type, 0); err != nil {
return err
}
if in.PublicIPs != nil {
out.DeprecatedPublicIPs = make([]string, len(in.PublicIPs))
for i := range in.PublicIPs {
out.DeprecatedPublicIPs[i] = in.PublicIPs[i]
}
} else {
out.DeprecatedPublicIPs = nil
}
out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity)
return nil
}
func convert_api_ServiceSpec_To_v1beta3_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.ServiceSpec))(in)
}
if in.Ports != nil {
out.Ports = make([]ServicePort, len(in.Ports))
for i := range in.Ports {
if err := convert_api_ServicePort_To_v1beta3_ServicePort(&in.Ports[i], &out.Ports[i], s); err != nil {
return err
}
}
} else {
out.Ports = nil
}
if in.Selector != nil {
out.Selector = make(map[string]string)
for key, val := range in.Selector {
out.Selector[key] = val
}
} else {
out.Selector = nil
}
out.PortalIP = in.PortalIP
if err := s.Convert(&in.Type, &out.Type, 0); err != nil {
return err
}
out.CreateExternalLoadBalancer = in.Type == api.ServiceTypeLoadBalancer
if in.DeprecatedPublicIPs != nil {
out.PublicIPs = make([]string, len(in.DeprecatedPublicIPs))
for i := range in.DeprecatedPublicIPs {
out.PublicIPs[i] = in.DeprecatedPublicIPs[i]
}
} else {
out.PublicIPs = nil
}
out.SessionAffinity = ServiceAffinity(in.SessionAffinity)
return nil
}

View File

@@ -719,6 +719,32 @@ func convert_api_ListOptions_To_v1beta3_ListOptions(in *api.ListOptions, out *Li
return nil
}
func convert_api_LoadBalancerIngress_To_v1beta3_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.LoadBalancerIngress))(in)
}
out.IP = in.IP
out.Hostname = in.Hostname
return nil
}
func convert_api_LoadBalancerStatus_To_v1beta3_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.LoadBalancerStatus))(in)
}
if in.Ingress != nil {
out.Ingress = make([]LoadBalancerIngress, len(in.Ingress))
for i := range in.Ingress {
if err := convert_api_LoadBalancerIngress_To_v1beta3_LoadBalancerIngress(&in.Ingress[i], &out.Ingress[i], s); err != nil {
return err
}
}
} else {
out.Ingress = nil
}
return nil
}
func convert_api_LocalObjectReference_To_v1beta3_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.LocalObjectReference))(in)
@@ -1984,42 +2010,7 @@ func convert_api_ServicePort_To_v1beta3_ServicePort(in *api.ServicePort, out *Se
if err := s.Convert(&in.TargetPort, &out.TargetPort, 0); err != nil {
return err
}
return nil
}
func convert_api_ServiceSpec_To_v1beta3_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.ServiceSpec))(in)
}
if in.Ports != nil {
out.Ports = make([]ServicePort, len(in.Ports))
for i := range in.Ports {
if err := convert_api_ServicePort_To_v1beta3_ServicePort(&in.Ports[i], &out.Ports[i], s); err != nil {
return err
}
}
} else {
out.Ports = nil
}
if in.Selector != nil {
out.Selector = make(map[string]string)
for key, val := range in.Selector {
out.Selector[key] = val
}
} else {
out.Selector = nil
}
out.PortalIP = in.PortalIP
out.CreateExternalLoadBalancer = in.CreateExternalLoadBalancer
if in.PublicIPs != nil {
out.PublicIPs = make([]string, len(in.PublicIPs))
for i := range in.PublicIPs {
out.PublicIPs[i] = in.PublicIPs[i]
}
} else {
out.PublicIPs = nil
}
out.SessionAffinity = ServiceAffinity(in.SessionAffinity)
out.NodePort = in.NodePort
return nil
}
@@ -2027,6 +2018,9 @@ func convert_api_ServiceStatus_To_v1beta3_ServiceStatus(in *api.ServiceStatus, o
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.ServiceStatus))(in)
}
if err := convert_api_LoadBalancerStatus_To_v1beta3_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
return err
}
return nil
}
@@ -2914,6 +2908,32 @@ func convert_v1beta3_ListOptions_To_api_ListOptions(in *ListOptions, out *api.Li
return nil
}
func convert_v1beta3_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*LoadBalancerIngress))(in)
}
out.IP = in.IP
out.Hostname = in.Hostname
return nil
}
func convert_v1beta3_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*LoadBalancerStatus))(in)
}
if in.Ingress != nil {
out.Ingress = make([]api.LoadBalancerIngress, len(in.Ingress))
for i := range in.Ingress {
if err := convert_v1beta3_LoadBalancerIngress_To_api_LoadBalancerIngress(&in.Ingress[i], &out.Ingress[i], s); err != nil {
return err
}
}
} else {
out.Ingress = nil
}
return nil
}
func convert_v1beta3_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*LocalObjectReference))(in)
@@ -4179,42 +4199,7 @@ func convert_v1beta3_ServicePort_To_api_ServicePort(in *ServicePort, out *api.Se
if err := s.Convert(&in.TargetPort, &out.TargetPort, 0); err != nil {
return err
}
return nil
}
func convert_v1beta3_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*ServiceSpec))(in)
}
if in.Ports != nil {
out.Ports = make([]api.ServicePort, len(in.Ports))
for i := range in.Ports {
if err := convert_v1beta3_ServicePort_To_api_ServicePort(&in.Ports[i], &out.Ports[i], s); err != nil {
return err
}
}
} else {
out.Ports = nil
}
if in.Selector != nil {
out.Selector = make(map[string]string)
for key, val := range in.Selector {
out.Selector[key] = val
}
} else {
out.Selector = nil
}
out.PortalIP = in.PortalIP
out.CreateExternalLoadBalancer = in.CreateExternalLoadBalancer
if in.PublicIPs != nil {
out.PublicIPs = make([]string, len(in.PublicIPs))
for i := range in.PublicIPs {
out.PublicIPs[i] = in.PublicIPs[i]
}
} else {
out.PublicIPs = nil
}
out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity)
out.NodePort = in.NodePort
return nil
}
@@ -4222,6 +4207,9 @@ func convert_v1beta3_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*ServiceStatus))(in)
}
if err := convert_v1beta3_LoadBalancerStatus_To_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil {
return err
}
return nil
}
@@ -4457,6 +4445,8 @@ func init() {
convert_api_ListMeta_To_v1beta3_ListMeta,
convert_api_ListOptions_To_v1beta3_ListOptions,
convert_api_List_To_v1beta3_List,
convert_api_LoadBalancerIngress_To_v1beta3_LoadBalancerIngress,
convert_api_LoadBalancerStatus_To_v1beta3_LoadBalancerStatus,
convert_api_LocalObjectReference_To_v1beta3_LocalObjectReference,
convert_api_NFSVolumeSource_To_v1beta3_NFSVolumeSource,
convert_api_NamespaceList_To_v1beta3_NamespaceList,
@@ -4517,7 +4507,6 @@ func init() {
convert_api_ServiceAccount_To_v1beta3_ServiceAccount,
convert_api_ServiceList_To_v1beta3_ServiceList,
convert_api_ServicePort_To_v1beta3_ServicePort,
convert_api_ServiceSpec_To_v1beta3_ServiceSpec,
convert_api_ServiceStatus_To_v1beta3_ServiceStatus,
convert_api_Service_To_v1beta3_Service,
convert_api_StatusCause_To_v1beta3_StatusCause,
@@ -4568,6 +4557,8 @@ func init() {
convert_v1beta3_ListMeta_To_api_ListMeta,
convert_v1beta3_ListOptions_To_api_ListOptions,
convert_v1beta3_List_To_api_List,
convert_v1beta3_LoadBalancerIngress_To_api_LoadBalancerIngress,
convert_v1beta3_LoadBalancerStatus_To_api_LoadBalancerStatus,
convert_v1beta3_LocalObjectReference_To_api_LocalObjectReference,
convert_v1beta3_NFSVolumeSource_To_api_NFSVolumeSource,
convert_v1beta3_NamespaceList_To_api_NamespaceList,
@@ -4628,7 +4619,6 @@ func init() {
convert_v1beta3_ServiceAccount_To_api_ServiceAccount,
convert_v1beta3_ServiceList_To_api_ServiceList,
convert_v1beta3_ServicePort_To_api_ServicePort,
convert_v1beta3_ServiceSpec_To_api_ServiceSpec,
convert_v1beta3_ServiceStatus_To_api_ServiceStatus,
convert_v1beta3_Service_To_api_Service,
convert_v1beta3_StatusCause_To_api_StatusCause,

View File

@@ -73,6 +73,15 @@ func addDefaultingFuncs() {
if obj.SessionAffinity == "" {
obj.SessionAffinity = ServiceAffinityNone
}
if obj.Type == "" {
if obj.CreateExternalLoadBalancer {
obj.Type = ServiceTypeLoadBalancer
} else {
obj.Type = ServiceTypeClusterIP
}
} else if obj.Type == ServiceTypeLoadBalancer {
obj.CreateExternalLoadBalancer = true
}
for i := range obj.Ports {
sp := &obj.Ports[i]
if sp.Protocol == "" {

View File

@@ -160,7 +160,20 @@ func TestSetDefaultService(t *testing.T) {
obj2 := roundTrip(t, runtime.Object(svc))
svc2 := obj2.(*versioned.Service)
if svc2.Spec.SessionAffinity != versioned.ServiceAffinityNone {
t.Errorf("Expected default sesseion affinity type:%s, got: %s", versioned.ServiceAffinityNone, svc2.Spec.SessionAffinity)
t.Errorf("Expected default session affinity type:%s, got: %s", versioned.ServiceAffinityNone, svc2.Spec.SessionAffinity)
}
if svc2.Spec.Type != versioned.ServiceTypeClusterIP {
t.Errorf("Expected default type:%s, got: %s", versioned.ServiceTypeClusterIP, svc2.Spec.Type)
}
}
func TestSetDefaultServiceWithLoadbalancer(t *testing.T) {
svc := &versioned.Service{}
svc.Spec.CreateExternalLoadBalancer = true
obj2 := roundTrip(t, runtime.Object(svc))
svc2 := obj2.(*versioned.Service)
if svc2.Spec.Type != versioned.ServiceTypeLoadBalancer {
t.Errorf("Expected default type:%s, got: %s", versioned.ServiceTypeLoadBalancer, svc2.Spec.Type)
}
}

View File

@@ -997,8 +997,49 @@ const (
ServiceAffinityNone ServiceAffinity = "None"
)
// Service Type string describes ingress methods for a service
type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the portal IP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
// every node, in addition to 'ClusterIP' type.
ServiceTypeNodePort ServiceType = "NodePort"
// ServiceTypeLoadBalancer means a service will be exposed via an
// external load balancer (if the cloud provider supports it), in addition
// to 'NodePort' type.
ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
)
// ServiceStatus represents the current status of a service
type ServiceStatus struct{}
type ServiceStatus struct {
// LoadBalancer contains the current status of the load-balancer,
// if one is present.
LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" description:"status of load-balancer"`
}
// LoadBalancerStatus represents the status of a load-balancer
type LoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer;
// traffic intended for the service should be sent to these ingress points.
Ingress []LoadBalancerIngress `json:"ingress,omitempty" description:"load-balancer ingress points"`
}
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based
// (typically GCE or OpenStack load-balancers)
IP string `json:"ip,omitempty" description:"IP address of ingress point"`
// Hostname is set for load-balancer ingress points that are DNS based
// (typically AWS load-balancers)
Hostname string `json:"hostname,omitempty" description:"hostname of ingress point"`
}
// ServiceSpec describes the attributes that a user creates on a service
type ServiceSpec struct {
@@ -1018,9 +1059,12 @@ type ServiceSpec struct {
// CreateExternalLoadBalancer indicates whether a load balancer should be created for this service.
CreateExternalLoadBalancer bool `json:"createExternalLoadBalancer,omitempty" description:"set up a cloud-provider-specific load balancer on an external IP"`
// PublicIPs are used by external load balancers, or can be set by
// Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
Type ServiceType `json:"type,omitempty" description:"type of this service; must be ClusterIP, NodePort, or LoadBalancer; defaults to ClusterIP"`
// Deprecated. PublicIPs are used by external load balancers, or can be set by
// users to handle external traffic that arrives at a node.
PublicIPs []string `json:"publicIPs,omitempty" description:"externally visible IPs (e.g. load balancers) that should be proxied to this service"`
PublicIPs []string `json:"publicIPs,omitempty" description:"deprecated. externally visible IPs (e.g. load balancers) that should be proxied to this service"`
// Optional: Supports "ClientIP" and "None". Used to maintain session affinity.
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" description:"enable client IP based session affinity; must be ClientIP or None; defaults to None"`
@@ -1045,6 +1089,10 @@ type ServicePort struct {
// target Pod's container ports. If this is not specified, the value
// of Port is used (an identity map).
TargetPort util.IntOrString `json:"targetPort,omitempty" description:"the port to access on the pods targeted by the service; defaults to the service port"`
// The port on each node on which this service is exposed.
// Default is to auto-allocate a port if the ServiceType of this Service requires one.
NodePort int `json:"nodePort" description:"the port on each node on which this service is exposed"`
}
// Service is a named abstraction of software service (for example, mysql) consisting of local port

View File

@@ -1032,6 +1032,8 @@ func ValidatePodTemplateUpdate(newPod, oldPod *api.PodTemplate) errs.ValidationE
}
var supportedSessionAffinityType = util.NewStringSet(string(api.ServiceAffinityClientIP), string(api.ServiceAffinityNone))
var supportedServiceType = util.NewStringSet(string(api.ServiceTypeClusterIP), string(api.ServiceTypeNodePort),
string(api.ServiceTypeLoadBalancer))
// ValidateService tests if required fields in the service are set.
func ValidateService(service *api.Service) errs.ValidationErrorList {
@@ -1062,7 +1064,7 @@ func ValidateService(service *api.Service) errs.ValidationErrorList {
}
}
for _, ip := range service.Spec.PublicIPs {
for _, ip := range service.Spec.DeprecatedPublicIPs {
if ip == "0.0.0.0" {
allErrs = append(allErrs, errs.NewFieldInvalid("spec.publicIPs", ip, "is not an IP address"))
} else if util.IsValidIPv4(ip) && net.ParseIP(ip).IsLoopback() {
@@ -1070,14 +1072,45 @@ func ValidateService(service *api.Service) errs.ValidationErrorList {
}
}
if service.Spec.CreateExternalLoadBalancer {
if service.Spec.Type == "" {
allErrs = append(allErrs, errs.NewFieldRequired("spec.type"))
} else if !supportedServiceType.Has(string(service.Spec.Type)) {
allErrs = append(allErrs, errs.NewFieldNotSupported("spec.type", service.Spec.Type))
}
if service.Spec.Type == api.ServiceTypeLoadBalancer {
for i := range service.Spec.Ports {
if service.Spec.Ports[i].Protocol != api.ProtocolTCP {
allErrs = append(allErrs, errs.NewFieldInvalid("spec.ports", service.Spec.Ports[i], "cannot create an external load balancer with non-TCP ports"))
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.ports[%d].protocol", i), service.Spec.Ports[i].Protocol, "cannot create an external load balancer with non-TCP ports"))
}
}
}
if service.Spec.Type == api.ServiceTypeClusterIP {
for i := range service.Spec.Ports {
if service.Spec.Ports[i].NodePort != 0 {
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.ports[%d].nodePort", i), service.Spec.Ports[i].NodePort, "cannot specify a node port with services of type ClusterIP"))
}
}
}
// Check for duplicate NodePorts, considering (protocol,port) pairs
nodePorts := make(map[api.ServicePort]bool)
for i := range service.Spec.Ports {
port := &service.Spec.Ports[i]
if port.NodePort == 0 {
continue
}
var key api.ServicePort
key.Protocol = port.Protocol
key.NodePort = port.NodePort
_, found := nodePorts[key]
if found {
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.ports[%d].nodePort", i), port.NodePort, "duplicate nodePort specified"))
}
nodePorts[key] = true
}
return allErrs
}
@@ -1343,7 +1376,7 @@ func ValidateSecret(secret *api.Secret) errs.ValidationErrorList {
allErrs = append(allErrs, errs.NewFieldRequired(fmt.Sprintf("metadata.annotations[%s]", api.ServiceAccountNameKey)))
}
case api.SecretTypeOpaque, "":
// no-op
// no-op
case api.SecretTypeDockercfg:
dockercfgBytes, exists := secret.Data[api.DockerConfigKey]
if !exists {

View File

@@ -1427,6 +1427,7 @@ func makeValidService() api.Service {
Spec: api.ServiceSpec{
Selector: map[string]string{"key": "val"},
SessionAffinity: "None",
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{Name: "p", Protocol: "TCP", Port: 8675}},
},
}
@@ -1522,6 +1523,13 @@ func TestValidateService(t *testing.T) {
},
numErrs: 1,
},
{
name: "missing type",
tweakSvc: func(s *api.Service) {
s.Spec.Type = ""
},
numErrs: 1,
},
{
name: "missing ports",
tweakSvc: func(s *api.Service) {
@@ -1603,21 +1611,21 @@ func TestValidateService(t *testing.T) {
{
name: "invalid publicIPs localhost",
tweakSvc: func(s *api.Service) {
s.Spec.PublicIPs = []string{"127.0.0.1"}
s.Spec.DeprecatedPublicIPs = []string{"127.0.0.1"}
},
numErrs: 1,
},
{
name: "invalid publicIPs",
tweakSvc: func(s *api.Service) {
s.Spec.PublicIPs = []string{"0.0.0.0"}
s.Spec.DeprecatedPublicIPs = []string{"0.0.0.0"}
},
numErrs: 1,
},
{
name: "valid publicIPs host",
tweakSvc: func(s *api.Service) {
s.Spec.PublicIPs = []string{"myhost.mydomain"}
s.Spec.DeprecatedPublicIPs = []string{"myhost.mydomain"}
},
numErrs: 0,
},
@@ -1632,7 +1640,7 @@ func TestValidateService(t *testing.T) {
{
name: "invalid load balancer protocol 1",
tweakSvc: func(s *api.Service) {
s.Spec.CreateExternalLoadBalancer = true
s.Spec.Type = api.ServiceTypeLoadBalancer
s.Spec.Ports[0].Protocol = "UDP"
},
numErrs: 1,
@@ -1640,7 +1648,7 @@ func TestValidateService(t *testing.T) {
{
name: "invalid load balancer protocol 2",
tweakSvc: func(s *api.Service) {
s.Spec.CreateExternalLoadBalancer = true
s.Spec.Type = api.ServiceTypeLoadBalancer
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "UDP"})
},
numErrs: 1,
@@ -1683,16 +1691,135 @@ func TestValidateService(t *testing.T) {
numErrs: 0,
},
{
name: "valid external load balancer",
name: "valid type - cluster",
tweakSvc: func(s *api.Service) {
s.Spec.CreateExternalLoadBalancer = true
s.Spec.Type = api.ServiceTypeClusterIP
},
numErrs: 0,
},
{
name: "valid type - loadbalancer",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeLoadBalancer
},
numErrs: 0,
},
{
name: "valid type loadbalancer 2 ports",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeLoadBalancer
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP"})
},
numErrs: 0,
},
{
name: "valid external load balancer 2 ports",
tweakSvc: func(s *api.Service) {
s.Spec.CreateExternalLoadBalancer = true
s.Spec.Type = api.ServiceTypeLoadBalancer
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP"})
},
numErrs: 0,
},
{
name: "duplicate nodeports",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeNodePort
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 1, Protocol: "TCP", NodePort: 1})
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "r", Port: 2, Protocol: "TCP", NodePort: 1})
},
numErrs: 1,
},
{
name: "duplicate nodeports (different protocols)",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeNodePort
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 1, Protocol: "TCP", NodePort: 1})
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "r", Port: 2, Protocol: "UDP", NodePort: 1})
},
numErrs: 0,
},
{
name: "valid type - cluster",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeClusterIP
},
numErrs: 0,
},
{
name: "valid type - nodeport",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeNodePort
},
numErrs: 0,
},
{
name: "valid type - loadbalancer",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeLoadBalancer
},
numErrs: 0,
},
{
name: "valid type loadbalancer 2 ports",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeLoadBalancer
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP"})
},
numErrs: 0,
},
{
name: "valid type loadbalancer with NodePort",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeLoadBalancer
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP", NodePort: 12345})
},
numErrs: 0,
},
{
name: "valid type=NodePort service with NodePort",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeNodePort
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP", NodePort: 12345})
},
numErrs: 0,
},
{
name: "valid type=NodePort service without NodePort",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeNodePort
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP"})
},
numErrs: 0,
},
{
name: "valid cluster service without NodePort",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeClusterIP
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP"})
},
numErrs: 0,
},
{
name: "invalid cluster service with NodePort",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeClusterIP
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP", NodePort: 12345})
},
numErrs: 1,
},
{
name: "invalid public service with duplicate NodePort",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeNodePort
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "p1", Port: 1, Protocol: "TCP", NodePort: 1})
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "p2", Port: 2, Protocol: "TCP", NodePort: 1})
},
numErrs: 1,
},
{
name: "valid type=LoadBalancer",
tweakSvc: func(s *api.Service) {
s.Spec.Type = api.ServiceTypeLoadBalancer
s.Spec.Ports = append(s.Spec.Ports, api.ServicePort{Name: "q", Port: 12345, Protocol: "TCP"})
},
numErrs: 0,
@@ -2458,6 +2585,27 @@ func TestValidateServiceUpdate(t *testing.T) {
},
numErrs: 1,
},
{
name: "change type",
tweakSvc: func(oldSvc, newSvc *api.Service) {
newSvc.Spec.Type = api.ServiceTypeLoadBalancer
},
numErrs: 0,
},
{
name: "remove type",
tweakSvc: func(oldSvc, newSvc *api.Service) {
newSvc.Spec.Type = ""
},
numErrs: 1,
},
{
name: "change type -> nodeport",
tweakSvc: func(oldSvc, newSvc *api.Service) {
newSvc.Spec.Type = api.ServiceTypeNodePort
},
numErrs: 0,
},
}
for _, tc := range testCases {

View File

@@ -63,10 +63,10 @@ func GetLoadBalancerName(service *api.Service) string {
type TCPLoadBalancer interface {
// TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service
// GetTCPLoadBalancer returns whether the specified load balancer exists, and
// if so, what its IP address or hostname is.
GetTCPLoadBalancer(name, region string) (endpoint string, exists bool, err error)
// CreateTCPLoadBalancer creates a new tcp load balancer. Returns the IP address or hostname of the balancer
CreateTCPLoadBalancer(name, region string, externalIP net.IP, ports []int, hosts []string, affinityType api.ServiceAffinity) (string, error)
// if so, what its status is.
GetTCPLoadBalancer(name, region string) (status *api.LoadBalancerStatus, exists bool, err error)
// CreateTCPLoadBalancer creates a new tcp load balancer. Returns the status of the balancer
CreateTCPLoadBalancer(name, region string, externalIP net.IP, ports []int, hosts []string, affinityType api.ServiceAffinity) (*api.LoadBalancerStatus, error)
// UpdateTCPLoadBalancer updates hosts under the specified load balancer.
UpdateTCPLoadBalancer(name, region string, hosts []string) error
// EnsureTCPLoadBalancerDeleted deletes the specified load balancer if it

View File

@@ -103,16 +103,23 @@ func (f *FakeCloud) Routes() (cloudprovider.Routes, bool) {
}
// GetTCPLoadBalancer is a stub implementation of TCPLoadBalancer.GetTCPLoadBalancer.
func (f *FakeCloud) GetTCPLoadBalancer(name, region string) (endpoint string, exists bool, err error) {
return f.ExternalIP.String(), f.Exists, f.Err
func (f *FakeCloud) GetTCPLoadBalancer(name, region string) (*api.LoadBalancerStatus, bool, error) {
status := &api.LoadBalancerStatus{}
status.Ingress = []api.LoadBalancerIngress{{IP: f.ExternalIP.String()}}
return status, f.Exists, f.Err
}
// CreateTCPLoadBalancer is a test-spy implementation of TCPLoadBalancer.CreateTCPLoadBalancer.
// It adds an entry "create" into the internal method call record.
func (f *FakeCloud) CreateTCPLoadBalancer(name, region string, externalIP net.IP, ports []int, hosts []string, affinityType api.ServiceAffinity) (string, error) {
func (f *FakeCloud) CreateTCPLoadBalancer(name, region string, externalIP net.IP, ports []int, hosts []string, affinityType api.ServiceAffinity) (*api.LoadBalancerStatus, error) {
f.addCall("create")
f.Balancers = append(f.Balancers, FakeBalancer{name, region, externalIP, ports, hosts})
return f.ExternalIP.String(), f.Err
status := &api.LoadBalancerStatus{}
status.Ingress = []api.LoadBalancerIngress{{IP: f.ExternalIP.String()}}
return status, f.Err
}
// UpdateTCPLoadBalancer is a test-spy implementation of TCPLoadBalancer.UpdateTCPLoadBalancer.

View File

@@ -282,15 +282,18 @@ func (gce *GCECloud) waitForZoneOp(op *compute.Operation) error {
}
// GetTCPLoadBalancer is an implementation of TCPLoadBalancer.GetTCPLoadBalancer
func (gce *GCECloud) GetTCPLoadBalancer(name, region string) (endpoint string, exists bool, err error) {
fw, err := gce.service.ForwardingRules.Get(gce.projectID, region, name).Do()
func (gce *GCECloud) GetTCPLoadBalancer(name, region string) (*api.LoadBalancerStatus, bool, error) {
fwd, err := gce.service.ForwardingRules.Get(gce.projectID, region, name).Do()
if err == nil {
return fw.IPAddress, true, nil
status := &api.LoadBalancerStatus{}
status.Ingress = []api.LoadBalancerIngress{{IP: fwd.IPAddress}}
return status, true, nil
}
if isHTTPErrorCode(err, http.StatusNotFound) {
return "", false, nil
return nil, false, nil
}
return "", false, err
return nil, false, err
}
func isHTTPErrorCode(err error, code int) bool {
@@ -314,17 +317,17 @@ func translateAffinityType(affinityType api.ServiceAffinity) GCEAffinityType {
// CreateTCPLoadBalancer is an implementation of TCPLoadBalancer.CreateTCPLoadBalancer.
// TODO(a-robinson): Don't just ignore specified IP addresses. Check if they're
// owned by the project and available to be used, and use them if they are.
func (gce *GCECloud) CreateTCPLoadBalancer(name, region string, externalIP net.IP, ports []int, hosts []string, affinityType api.ServiceAffinity) (string, error) {
func (gce *GCECloud) CreateTCPLoadBalancer(name, region string, externalIP net.IP, ports []int, hosts []string, affinityType api.ServiceAffinity) (*api.LoadBalancerStatus, error) {
err := gce.makeTargetPool(name, region, hosts, translateAffinityType(affinityType))
if err != nil {
if !isHTTPErrorCode(err, http.StatusConflict) {
return "", err
return nil, err
}
glog.Infof("Creating forwarding rule pointing at target pool that already exists: %v", err)
}
if len(ports) == 0 {
return "", fmt.Errorf("no ports specified for GCE load balancer")
return nil, fmt.Errorf("no ports specified for GCE load balancer")
}
minPort := 65536
maxPort := 0
@@ -344,19 +347,22 @@ func (gce *GCECloud) CreateTCPLoadBalancer(name, region string, externalIP net.I
}
op, err := gce.service.ForwardingRules.Insert(gce.projectID, region, req).Do()
if err != nil && !isHTTPErrorCode(err, http.StatusConflict) {
return "", err
return nil, err
}
if op != nil {
err = gce.waitForRegionOp(op, region)
if err != nil && !isHTTPErrorCode(err, http.StatusConflict) {
return "", err
return nil, err
}
}
fwd, err := gce.service.ForwardingRules.Get(gce.projectID, region, name).Do()
if err != nil {
return "", err
return nil, err
}
return fwd.IPAddress, nil
status := &api.LoadBalancerStatus{}
status.Ingress = []api.LoadBalancerIngress{{IP: fwd.IPAddress}}
return status, nil
}
// UpdateTCPLoadBalancer is an implementation of TCPLoadBalancer.UpdateTCPLoadBalancer.

View File

@@ -457,15 +457,19 @@ func getVipByName(client *gophercloud.ServiceClient, name string) (*vips.Virtual
return &vipList[0], nil
}
func (lb *LoadBalancer) GetTCPLoadBalancer(name, region string) (endpoint string, exists bool, err error) {
func (lb *LoadBalancer) GetTCPLoadBalancer(name, region string) (*api.LoadBalancerStatus, bool, error) {
vip, err := getVipByName(lb.network, name)
if err == ErrNotFound {
return "", false, nil
return nil, false, nil
}
if vip == nil {
return "", false, err
return nil, false, err
}
return vip.Address, true, err
status := &api.LoadBalancerStatus{}
status.Ingress = []api.LoadBalancerIngress{{IP: vip.Address}}
return status, true, err
}
// TODO: This code currently ignores 'region' and always creates a
@@ -473,11 +477,11 @@ func (lb *LoadBalancer) GetTCPLoadBalancer(name, region string) (endpoint string
// a list of regions (from config) and query/create loadbalancers in
// each region.
func (lb *LoadBalancer) CreateTCPLoadBalancer(name, region string, externalIP net.IP, ports []int, hosts []string, affinity api.ServiceAffinity) (string, error) {
func (lb *LoadBalancer) CreateTCPLoadBalancer(name, region string, externalIP net.IP, ports []int, hosts []string, affinity api.ServiceAffinity) (*api.LoadBalancerStatus, error) {
glog.V(4).Infof("CreateTCPLoadBalancer(%v, %v, %v, %v, %v, %v)", name, region, externalIP, ports, hosts, affinity)
if len(ports) > 1 {
return "", fmt.Errorf("multiple ports are not yet supported in openstack load balancers")
return nil, fmt.Errorf("multiple ports are not yet supported in openstack load balancers")
}
var persistence *vips.SessionPersistence
@@ -487,7 +491,7 @@ func (lb *LoadBalancer) CreateTCPLoadBalancer(name, region string, externalIP ne
case api.ServiceAffinityClientIP:
persistence = &vips.SessionPersistence{Type: "SOURCE_IP"}
default:
return "", fmt.Errorf("unsupported load balancer affinity: %v", affinity)
return nil, fmt.Errorf("unsupported load balancer affinity: %v", affinity)
}
lbmethod := lb.opts.LBMethod
@@ -501,13 +505,13 @@ func (lb *LoadBalancer) CreateTCPLoadBalancer(name, region string, externalIP ne
LBMethod: lbmethod,
}).Extract()
if err != nil {
return "", err
return nil, err
}
for _, host := range hosts {
addr, err := getAddressByName(lb.compute, host)
if err != nil {
return "", err
return nil, err
}
_, err = members.Create(lb.network, members.CreateOpts{
@@ -517,7 +521,7 @@ func (lb *LoadBalancer) CreateTCPLoadBalancer(name, region string, externalIP ne
}).Extract()
if err != nil {
pools.Delete(lb.network, pool.ID)
return "", err
return nil, err
}
}
@@ -531,14 +535,14 @@ func (lb *LoadBalancer) CreateTCPLoadBalancer(name, region string, externalIP ne
}).Extract()
if err != nil {
pools.Delete(lb.network, pool.ID)
return "", err
return nil, err
}
_, err = pools.AssociateMonitor(lb.network, pool.ID, mon.ID).Extract()
if err != nil {
monitors.Delete(lb.network, mon.ID)
pools.Delete(lb.network, pool.ID)
return "", err
return nil, err
}
}
@@ -556,10 +560,13 @@ func (lb *LoadBalancer) CreateTCPLoadBalancer(name, region string, externalIP ne
monitors.Delete(lb.network, mon.ID)
}
pools.Delete(lb.network, pool.ID)
return "", err
return nil, err
}
return vip.Address, nil
status := &api.LoadBalancerStatus{}
status.Ingress = []api.LoadBalancerIngress{{IP: vip.Address}}
return status, nil
}
func (lb *LoadBalancer) UpdateTCPLoadBalancer(name, region string, hosts []string) error {

View File

@@ -231,7 +231,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name
if cachedService != nil {
// If the service already exists but needs to be updated, delete it so that
// we can recreate it cleanly.
if cachedService.Spec.CreateExternalLoadBalancer {
if wantsExternalLoadBalancer(cachedService) {
glog.Infof("Deleting existing load balancer for service %s that needs an updated load balancer.", namespacedName)
if err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(cachedService), s.zone.Region); err != nil {
return err, retryable
@@ -240,49 +240,49 @@ func (s *ServiceController) createLoadBalancerIfNeeded(namespacedName types.Name
} else {
// If we don't have any cached memory of the load balancer, we have to ask
// the cloud provider for what it knows about it.
endpoint, exists, err := s.balancer.GetTCPLoadBalancer(s.loadBalancerName(service), s.zone.Region)
status, exists, err := s.balancer.GetTCPLoadBalancer(s.loadBalancerName(service), s.zone.Region)
if err != nil {
return fmt.Errorf("Error getting LB for service %s", namespacedName), retryable
return fmt.Errorf("Error getting LB for service %s: %v", namespacedName, err), retryable
}
if exists && stringSlicesEqual(service.Spec.PublicIPs, []string{endpoint}) {
// TODO: If we could read more of the spec (ports, affinityType) of the
// existing load balancer, we could better determine if an update is
// necessary in more cases. For now, we optimistically assume that a
// matching IP suffices.
glog.Infof("LB already exists with endpoint %s for previously uncached service %s", endpoint, namespacedName)
if exists && api.LoadBalancerStatusEqual(status, &service.Status.LoadBalancer) {
glog.Infof("LB already exists with status %s for previously uncached service %s", status, namespacedName)
return nil, notRetryable
} else if exists {
glog.Infof("Deleting old LB for previously uncached service %s whose endpoint %s doesn't match the service's desired IPs %v",
namespacedName, endpoint, service.Spec.PublicIPs)
namespacedName, status, service.Spec.DeprecatedPublicIPs)
if err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region); err != nil {
return err, retryable
}
}
}
if !service.Spec.CreateExternalLoadBalancer {
// Save the state so we can avoid a write if it doesn't change
previousState := api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
if !wantsExternalLoadBalancer(service) {
glog.Infof("Not creating LB for service %s that doesn't want one.", namespacedName)
return nil, notRetryable
service.Status.LoadBalancer = api.LoadBalancerStatus{}
} else {
glog.V(2).Infof("Creating LB for service %s", namespacedName)
// The load balancer doesn't exist yet, so create it.
err := s.createExternalLoadBalancer(service)
if err != nil {
return fmt.Errorf("failed to create external load balancer for service %s: %v", namespacedName, err), retryable
}
}
glog.V(2).Infof("Creating LB for service %s", namespacedName)
// The load balancer doesn't exist yet, so create it.
publicIPstring := fmt.Sprint(service.Spec.PublicIPs)
err := s.createExternalLoadBalancer(service)
if err != nil {
return fmt.Errorf("failed to create external load balancer for service %s: %v", namespacedName, err), retryable
// Write the state if changed
// TODO: Be careful here ... what if there were other changes to the service?
if !api.LoadBalancerStatusEqual(previousState, &service.Status.LoadBalancer) {
if err := s.persistUpdate(service); err != nil {
return fmt.Errorf("Failed to persist updated status to apiserver, even after retries. Giving up: %v", err), notRetryable
}
} else {
glog.Infof("Not persisting unchanged LoadBalancerStatus to registry.")
}
if publicIPstring == fmt.Sprint(service.Spec.PublicIPs) {
glog.Infof("Not persisting unchanged service to registry.")
return nil, notRetryable
}
// If creating the load balancer succeeded, persist the updated service.
if err = s.persistUpdate(service); err != nil {
return fmt.Errorf("Failed to persist updated publicIPs to apiserver, even after retries. Giving up: %v", err), notRetryable
}
return nil, notRetryable
}
@@ -301,13 +301,13 @@ func (s *ServiceController) persistUpdate(service *api.Service) error {
return nil
}
// TODO: Try to resolve the conflict if the change was unrelated to load
// balancers and public IPs. For now, just rely on the fact that we'll
// balancer status. For now, just rely on the fact that we'll
// also process the update that caused the resource version to change.
if errors.IsConflict(err) {
glog.Infof("Not persisting update to service that has been changed since we received it: %v", err)
return nil
}
glog.Warningf("Failed to persist updated PublicIPs to service %s after creating its external load balancer: %v",
glog.Warningf("Failed to persist updated LoadBalancerStatus to service %s after creating its external load balancer: %v",
service.Name, err)
time.Sleep(clientRetryInterval)
}
@@ -324,25 +324,27 @@ func (s *ServiceController) createExternalLoadBalancer(service *api.Service) err
return err
}
name := s.loadBalancerName(service)
if len(service.Spec.PublicIPs) > 0 {
for _, publicIP := range service.Spec.PublicIPs {
if len(service.Spec.DeprecatedPublicIPs) > 0 {
for _, publicIP := range service.Spec.DeprecatedPublicIPs {
// TODO: Make this actually work for multiple IPs by using different
// names for each. For now, we'll just create the first and break.
endpoint, err := s.balancer.CreateTCPLoadBalancer(name, s.zone.Region, net.ParseIP(publicIP),
status, err := s.balancer.CreateTCPLoadBalancer(name, s.zone.Region, net.ParseIP(publicIP),
ports, hostsFromNodeList(nodes), service.Spec.SessionAffinity)
if err != nil {
return err
} else {
service.Status.LoadBalancer = *status
}
service.Spec.PublicIPs = []string{endpoint}
break
}
} else {
endpoint, err := s.balancer.CreateTCPLoadBalancer(name, s.zone.Region, nil,
status, err := s.balancer.CreateTCPLoadBalancer(name, s.zone.Region, nil,
ports, hostsFromNodeList(nodes), service.Spec.SessionAffinity)
if err != nil {
return err
} else {
service.Status.LoadBalancer = *status
}
service.Spec.PublicIPs = []string{endpoint}
}
return nil
}
@@ -402,20 +404,20 @@ func (s *serviceCache) delete(serviceName string) {
}
func needsUpdate(oldService *api.Service, newService *api.Service) bool {
if !oldService.Spec.CreateExternalLoadBalancer && !newService.Spec.CreateExternalLoadBalancer {
if !wantsExternalLoadBalancer(oldService) && !wantsExternalLoadBalancer(newService) {
return false
}
if oldService.Spec.CreateExternalLoadBalancer != newService.Spec.CreateExternalLoadBalancer {
if wantsExternalLoadBalancer(oldService) != wantsExternalLoadBalancer(newService) {
return true
}
if !portsEqual(oldService, newService) || oldService.Spec.SessionAffinity != newService.Spec.SessionAffinity {
return true
}
if len(oldService.Spec.PublicIPs) != len(newService.Spec.PublicIPs) {
if len(oldService.Spec.DeprecatedPublicIPs) != len(newService.Spec.DeprecatedPublicIPs) {
return true
}
for i := range oldService.Spec.PublicIPs {
if oldService.Spec.PublicIPs[i] != newService.Spec.PublicIPs[i] {
for i := range oldService.Spec.DeprecatedPublicIPs {
if oldService.Spec.DeprecatedPublicIPs[i] != newService.Spec.DeprecatedPublicIPs[i] {
return true
}
}
@@ -559,7 +561,7 @@ func (s *ServiceController) updateLoadBalancerHosts(services []*cachedService, h
// Updates the external load balancer of a service, assuming we hold the mutex
// associated with the service.
func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service, hosts []string) error {
if !service.Spec.CreateExternalLoadBalancer {
if !wantsExternalLoadBalancer(service) {
return nil
}
@@ -577,3 +579,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service,
}
return err
}
func wantsExternalLoadBalancer(service *api.Service) bool {
return service.Spec.Type == api.ServiceTypeLoadBalancer
}

View File

@@ -28,8 +28,8 @@ import (
const region = "us-central"
func newService(name string, uid types.UID, external bool) *api.Service {
return &api.Service{ObjectMeta: api.ObjectMeta{Name: name, Namespace: "namespace", UID: uid}, Spec: api.ServiceSpec{CreateExternalLoadBalancer: external}}
func newService(name string, uid types.UID, serviceType api.ServiceType) *api.Service {
return &api.Service{ObjectMeta: api.ObjectMeta{Name: name, Namespace: "namespace", UID: uid}, Spec: api.ServiceSpec{Type: serviceType}}
}
func TestCreateExternalLoadBalancer(t *testing.T) {
@@ -45,7 +45,7 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
Namespace: "default",
},
Spec: api.ServiceSpec{
CreateExternalLoadBalancer: false,
Type: api.ServiceTypeClusterIP,
},
},
expectErr: false,
@@ -62,7 +62,7 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
Port: 80,
Protocol: api.ProtocolUDP,
}},
CreateExternalLoadBalancer: true,
Type: api.ServiceTypeLoadBalancer,
},
},
expectErr: true,
@@ -79,7 +79,7 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
Port: 80,
Protocol: api.ProtocolTCP,
}},
CreateExternalLoadBalancer: true,
Type: api.ServiceTypeLoadBalancer,
},
},
expectErr: false,
@@ -144,15 +144,15 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
{
// Services do not have external load balancers: no calls should be made.
services: []*api.Service{
newService("s0", "111", false),
newService("s1", "222", false),
newService("s0", "111", api.ServiceTypeClusterIP),
newService("s1", "222", api.ServiceTypeNodePort),
},
expectedUpdateCalls: nil,
},
{
// Services does have an external load balancer: one call should be made.
services: []*api.Service{
newService("s0", "333", true),
newService("s0", "333", api.ServiceTypeLoadBalancer),
},
expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{
{Name: "a333", Region: region, Hosts: []string{"node0", "node1", "node73"}},
@@ -161,9 +161,9 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
{
// Three services have an external load balancer: three calls.
services: []*api.Service{
newService("s0", "444", true),
newService("s1", "555", true),
newService("s2", "666", true),
newService("s0", "444", api.ServiceTypeLoadBalancer),
newService("s1", "555", api.ServiceTypeLoadBalancer),
newService("s2", "666", api.ServiceTypeLoadBalancer),
},
expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{
{Name: "a444", Region: region, Hosts: []string{"node0", "node1", "node73"}},
@@ -174,10 +174,10 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
{
// Two services have an external load balancer and two don't: two calls.
services: []*api.Service{
newService("s0", "777", false),
newService("s1", "888", true),
newService("s3", "999", true),
newService("s4", "123", false),
newService("s0", "777", api.ServiceTypeNodePort),
newService("s1", "888", api.ServiceTypeLoadBalancer),
newService("s3", "999", api.ServiceTypeLoadBalancer),
newService("s4", "123", api.ServiceTypeClusterIP),
},
expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{
{Name: "a888", Region: region, Hosts: []string{"node0", "node1", "node73"}},
@@ -187,7 +187,7 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
{
// One service has an external load balancer and one is nil: one call.
services: []*api.Service{
newService("s0", "234", true),
newService("s0", "234", api.ServiceTypeLoadBalancer),
nil,
},
expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{

View File

@@ -72,9 +72,14 @@ func RunClusterInfo(factory *cmdutil.Factory, out io.Writer, cmd *cobra.Command)
services := r.Object.(*api.ServiceList).Items
for _, service := range services {
var link string
if len(service.Spec.PublicIPs) > 0 {
if len(service.Status.LoadBalancer.Ingress) > 0 {
ingress := service.Status.LoadBalancer.Ingress[0]
ip := ingress.IP
if ip == "" {
ip = ingress.Hostname
}
for _, port := range service.Spec.Ports {
link += "http://" + service.Spec.PublicIPs[0] + ":" + strconv.Itoa(port.Port) + " "
link += "http://" + ip + ":" + strconv.Itoa(port.Port) + " "
}
} else {
link = client.Host + "/api/v1beta3/proxy/namespaces/" + service.ObjectMeta.Namespace + "/services/" + service.ObjectMeta.Name

View File

@@ -46,7 +46,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream`
func NewCmdExposeService(f *cmdutil.Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "expose RESOURCE NAME --port=port [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [--public-ip=ip] [--create-external-load-balancer=bool]",
Use: "expose RESOURCE NAME --port=port [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [--public-ip=ip] [--type=type]",
Short: "Take a replicated application and expose it as Kubernetes Service",
Long: expose_long,
Example: expose_example,
@@ -60,7 +60,8 @@ func NewCmdExposeService(f *cmdutil.Factory, out io.Writer) *cobra.Command {
cmd.Flags().String("protocol", "TCP", "The network protocol for the service to be created. Default is 'tcp'.")
cmd.Flags().Int("port", -1, "The port that the service should serve on. Required.")
cmd.MarkFlagRequired("port")
cmd.Flags().Bool("create-external-load-balancer", false, "If true, create an external load balancer for this service. Implementation is cloud provider dependent. Default is 'false'.")
cmd.Flags().String("type", "", "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is 'ClusterIP' unless --create-external-load-balancer is specified.")
cmd.Flags().Bool("create-external-load-balancer", false, "If true, create an external load balancer for this service (trumped by --type). Implementation is cloud provider dependent. Default is 'false'.")
cmd.Flags().String("selector", "", "A label selector to use for this service. If empty (the default) infer the selector from the replication controller.")
cmd.Flags().StringP("labels", "l", "", "Labels to apply to the service created by this call.")
cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without creating it.")
@@ -161,6 +162,9 @@ func RunExpose(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str
}
params["labels"] = kubectl.MakeLabels(labels)
}
if v := cmdutil.GetFlagString(cmd, "type"); v != "" {
params["type"] = v
}
err = kubectl.ValidateParams(names, params)
if err != nil {
return err

View File

@@ -68,6 +68,7 @@ func testData() (*api.PodList, *api.ServiceList, *api.ReplicationControllerList)
ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"},
Spec: api.ServiceSpec{
SessionAffinity: "None",
Type: api.ServiceTypeClusterIP,
},
},
},

View File

@@ -141,6 +141,7 @@ func TestMerge(t *testing.T) {
expected: &api.Service{
Spec: api.ServiceSpec{
SessionAffinity: "None",
Type: api.ServiceTypeClusterIP,
},
},
},
@@ -157,6 +158,7 @@ func TestMerge(t *testing.T) {
expected: &api.Service{
Spec: api.ServiceSpec{
SessionAffinity: "None",
Type: api.ServiceTypeClusterIP,
Selector: map[string]string{
"version": "v2",
},

View File

@@ -17,6 +17,7 @@ limitations under the License.
package kubectl
import (
"bytes"
"fmt"
"io"
"reflect"
@@ -480,6 +481,22 @@ func (d *ServiceDescriber) Describe(namespace, name string) (string, error) {
return describeService(service, endpoints, events)
}
func buildIngressString(ingress []api.LoadBalancerIngress) string {
var buffer bytes.Buffer
for i := range ingress {
if i != 0 {
buffer.WriteString(", ")
}
if ingress[i].IP != "" {
buffer.WriteString(ingress[i].IP)
} else {
buffer.WriteString(ingress[i].Hostname)
}
}
return buffer.String()
}
func describeService(service *api.Service, endpoints *api.Endpoints, events *api.EventList) (string, error) {
if endpoints == nil {
endpoints = &api.Endpoints{}
@@ -488,10 +505,11 @@ func describeService(service *api.Service, endpoints *api.Endpoints, events *api
fmt.Fprintf(out, "Name:\t%s\n", service.Name)
fmt.Fprintf(out, "Labels:\t%s\n", formatLabels(service.Labels))
fmt.Fprintf(out, "Selector:\t%s\n", formatLabels(service.Spec.Selector))
fmt.Fprintf(out, "Type:\t%s\n", service.Spec.Type)
fmt.Fprintf(out, "IP:\t%s\n", service.Spec.PortalIP)
if len(service.Spec.PublicIPs) > 0 {
list := strings.Join(service.Spec.PublicIPs, ", ")
fmt.Fprintf(out, "Public IPs:\t%s\n", list)
if len(service.Status.LoadBalancer.Ingress) > 0 {
list := buildIngressString(service.Status.LoadBalancer.Ingress)
fmt.Fprintf(out, "LoadBalancer Ingress:\t%s\n", list)
}
for i := range service.Spec.Ports {
sp := &service.Spec.Ports[i]
@@ -501,6 +519,9 @@ func describeService(service *api.Service, endpoints *api.Endpoints, events *api
name = "<unnamed>"
}
fmt.Fprintf(out, "Port:\t%s\t%d/%s\n", name, sp.Port, sp.Protocol)
if sp.NodePort != 0 {
fmt.Fprintf(out, "NodePort:\t%s\t%d/%s\n", name, sp.Port, sp.Protocol)
}
fmt.Fprintf(out, "Endpoints:\t%s\t%s\n", name, formatEndpoints(endpoints, util.NewStringSet(sp.Name)))
}
fmt.Fprintf(out, "Session Affinity:\t%s\n", service.Spec.SessionAffinity)

View File

@@ -552,8 +552,12 @@ func printService(svc *api.Service, w io.Writer, withNamespace bool) error {
}
ips := []string{svc.Spec.PortalIP}
for _, publicIP := range svc.Spec.PublicIPs {
ips = append(ips, publicIP)
ingress := svc.Status.LoadBalancer.Ingress
for i := range ingress {
if ingress[i].IP != "" {
ips = append(ips, ingress[i].IP)
}
}
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d/%s\n", name, formatLabels(svc.Labels),

View File

@@ -646,10 +646,6 @@ func TestPrintHumanReadableService(t *testing.T) {
{
Spec: api.ServiceSpec{
PortalIP: "1.2.3.4",
PublicIPs: []string{
"2.3.4.5",
"3.4.5.6",
},
Ports: []api.ServicePort{
{
Port: 80,
@@ -657,6 +653,18 @@ func TestPrintHumanReadableService(t *testing.T) {
},
},
},
Status: api.ServiceStatus{
LoadBalancer: api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
{
IP: "2.3.4.5",
},
{
IP: "3.4.5.6",
},
},
},
},
},
{
Spec: api.ServiceSpec{
@@ -680,9 +688,6 @@ func TestPrintHumanReadableService(t *testing.T) {
{
Spec: api.ServiceSpec{
PortalIP: "1.2.3.4",
PublicIPs: []string{
"2.3.4.5",
},
Ports: []api.ServicePort{
{
Port: 80,
@@ -698,15 +703,19 @@ func TestPrintHumanReadableService(t *testing.T) {
},
},
},
Status: api.ServiceStatus{
LoadBalancer: api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
{
IP: "2.3.4.5",
},
},
},
},
},
{
Spec: api.ServiceSpec{
PortalIP: "1.2.3.4",
PublicIPs: []string{
"2.3.4.5",
"4.5.6.7",
"5.6.7.8",
},
Ports: []api.ServicePort{
{
Port: 80,
@@ -722,6 +731,22 @@ func TestPrintHumanReadableService(t *testing.T) {
},
},
},
Status: api.ServiceStatus{
LoadBalancer: api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
{
IP: "2.3.4.5",
},
{
IP: "3.4.5.6",
},
{
IP: "5.6.7.8",
Hostname: "host5678",
},
},
},
},
},
}
@@ -734,9 +759,10 @@ func TestPrintHumanReadableService(t *testing.T) {
t.Errorf("expected to contain portal ip %s, but doesn't: %s", ip, output)
}
for _, ip = range svc.Spec.PublicIPs {
for _, ingress := range svc.Status.LoadBalancer.Ingress {
ip = ingress.IP
if !strings.Contains(output, ip) {
t.Errorf("expected to contain public ip %s, but doesn't: %s", ip, output)
t.Errorf("expected to contain ingress ip %s, but doesn't: %s", ip, output)
}
}
@@ -748,8 +774,8 @@ func TestPrintHumanReadableService(t *testing.T) {
}
// Max of # ports and (# public ip + portal ip)
count := len(svc.Spec.Ports)
if len(svc.Spec.PublicIPs)+1 > count {
count = len(svc.Spec.PublicIPs) + 1
if len(svc.Status.LoadBalancer.Ingress)+1 > count {
count = len(svc.Status.LoadBalancer.Ingress) + 1
}
if count != strings.Count(output, "\n") {
t.Errorf("expected %d newlines, found %d", count, strings.Count(output, "\n"))
@@ -907,9 +933,6 @@ func TestPrintHumanReadableWithNamespace(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName},
Spec: api.ServiceSpec{
PortalIP: "1.2.3.4",
PublicIPs: []string{
"2.3.4.5",
},
Ports: []api.ServicePort{
{
Port: 80,
@@ -917,6 +940,15 @@ func TestPrintHumanReadableWithNamespace(t *testing.T) {
},
},
},
Status: api.ServiceStatus{
LoadBalancer: api.LoadBalancerStatus{
Ingress: []api.LoadBalancerIngress{
{
IP: "2.3.4.5",
},
},
},
},
},
printNamespace: true,
},

View File

@@ -35,6 +35,7 @@ func (ServiceGenerator) ParamNames() []GeneratorParam {
{"labels", false},
{"public-ip", false},
{"create-external-load-balancer", false},
{"type", false},
{"protocol", false},
{"container-port", false}, // alias of target-port
{"target-port", false},
@@ -102,10 +103,13 @@ func (ServiceGenerator) Generate(params map[string]string) (runtime.Object, erro
service.Spec.Ports[0].TargetPort = util.NewIntOrStringFromInt(port)
}
if params["create-external-load-balancer"] == "true" {
service.Spec.CreateExternalLoadBalancer = true
service.Spec.Type = api.ServiceTypeLoadBalancer
}
if len(params["public-ip"]) != 0 {
service.Spec.PublicIPs = []string{params["public-ip"]}
service.Spec.DeprecatedPublicIPs = []string{params["public-ip"]}
}
if len(params["type"]) != 0 {
service.Spec.Type = api.ServiceType(params["type"])
}
return &service, nil
}

View File

@@ -144,7 +144,7 @@ func TestGenerateService(t *testing.T) {
TargetPort: util.NewIntOrStringFromString("foobar"),
},
},
PublicIPs: []string{"1.2.3.4"},
DeprecatedPublicIPs: []string{"1.2.3.4"},
},
},
},
@@ -175,8 +175,69 @@ func TestGenerateService(t *testing.T) {
TargetPort: util.NewIntOrStringFromString("foobar"),
},
},
PublicIPs: []string{"1.2.3.4"},
CreateExternalLoadBalancer: true,
Type: api.ServiceTypeLoadBalancer,
DeprecatedPublicIPs: []string{"1.2.3.4"},
},
},
},
{
params: map[string]string{
"selector": "foo=bar,baz=blah",
"name": "test",
"port": "80",
"protocol": "UDP",
"container-port": "foobar",
"type": string(api.ServiceTypeNodePort),
},
expected: api.Service{
ObjectMeta: api.ObjectMeta{
Name: "test",
},
Spec: api.ServiceSpec{
Selector: map[string]string{
"foo": "bar",
"baz": "blah",
},
Ports: []api.ServicePort{
{
Name: "default",
Port: 80,
Protocol: "UDP",
TargetPort: util.NewIntOrStringFromString("foobar"),
},
},
Type: api.ServiceTypeNodePort,
},
},
},
{
params: map[string]string{
"selector": "foo=bar,baz=blah",
"name": "test",
"port": "80",
"protocol": "UDP",
"container-port": "foobar",
"create-external-load-balancer": "true", // ignored when type is present
"type": string(api.ServiceTypeNodePort),
},
expected: api.Service{
ObjectMeta: api.ObjectMeta{
Name: "test",
},
Spec: api.ServiceSpec{
Selector: map[string]string{
"foo": "bar",
"baz": "blah",
},
Ports: []api.ServicePort{
{
Name: "default",
Port: 80,
Protocol: "UDP",
TargetPort: util.NewIntOrStringFromString("foobar"),
},
},
Type: api.ServiceTypeNodePort,
},
},
},

View File

@@ -28,6 +28,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/namespace"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service"
servicecontroller "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/ipallocator/controller"
portallocatorcontroller "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/portallocator/controller"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog"
@@ -39,12 +40,16 @@ import (
type Controller struct {
NamespaceRegistry namespace.Registry
ServiceRegistry service.Registry
ServiceIPRegistry service.IPRegistry
ServiceIPRegistry service.RangeRegistry
EndpointRegistry endpoint.Registry
PortalNet *net.IPNet
// TODO: MasterCount is yucky
MasterCount int
ServiceNodePortRegistry service.RangeRegistry
ServiceNodePortInterval time.Duration
ServiceNodePorts util.PortRange
PortalIPInterval time.Duration
EndpointInterval time.Duration
@@ -68,12 +73,16 @@ func (c *Controller) Start() {
return
}
repair := servicecontroller.NewRepair(c.PortalIPInterval, c.ServiceRegistry, c.PortalNet, c.ServiceIPRegistry)
repairPortals := servicecontroller.NewRepair(c.PortalIPInterval, c.ServiceRegistry, c.PortalNet, c.ServiceIPRegistry)
repairNodePorts := portallocatorcontroller.NewRepair(c.ServiceNodePortInterval, c.ServiceRegistry, c.ServiceNodePorts, c.ServiceNodePortRegistry)
// run all of the controllers once prior to returning from Start.
if err := repair.RunOnce(); err != nil {
if err := repairPortals.RunOnce(); err != nil {
glog.Errorf("Unable to perform initial IP allocation check: %v", err)
}
if err := repairNodePorts.RunOnce(); err != nil {
glog.Errorf("Unable to perform initial service nodePort check: %v", err)
}
if err := c.UpdateKubernetesService(); err != nil {
glog.Errorf("Unable to perform initial Kubernetes service initialization: %v", err)
}
@@ -81,7 +90,7 @@ func (c *Controller) Start() {
glog.Errorf("Unable to perform initial Kubernetes RO service initialization: %v", err)
}
c.runner = util.NewRunner(c.RunKubernetesService, c.RunKubernetesROService, repair.RunUntil)
c.runner = util.NewRunner(c.RunKubernetesService, c.RunKubernetesROService, repairPortals.RunUntil, repairNodePorts.RunUntil)
c.runner.Start()
}

View File

@@ -63,13 +63,15 @@ import (
resourcequotaetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/resourcequota/etcd"
secretetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/secret/etcd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service"
etcdallocator "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/allocator/etcd"
ipallocator "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/ipallocator"
etcdipallocator "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/ipallocator/etcd"
serviceaccountetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/serviceaccount/etcd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/ui"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/allocator"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/portallocator"
"github.com/emicklei/go-restful"
"github.com/emicklei/go-restful/swagger"
"github.com/golang/glog"
@@ -141,13 +143,17 @@ type Config struct {
// The name of the cluster.
ClusterName string
// The range of ports to be assigned to services with type=NodePort or greater
ServiceNodePorts util.PortRange
}
// Master contains state for a Kubernetes cluster master/api server.
type Master struct {
// "Inputs", Copied from Config
portalNet *net.IPNet
cacheTimeout time.Duration
portalNet *net.IPNet
serviceNodePorts util.PortRange
cacheTimeout time.Duration
mux apiserver.Mux
muxHelper *apiserver.MuxHelper
@@ -188,11 +194,12 @@ type Master struct {
// registries are internal client APIs for accessing the storage layer
// TODO: define the internal typed interface in a way that clients can
// also be replaced
nodeRegistry minion.Registry
namespaceRegistry namespace.Registry
serviceRegistry service.Registry
endpointRegistry endpoint.Registry
portalAllocator service.IPRegistry
nodeRegistry minion.Registry
namespaceRegistry namespace.Registry
serviceRegistry service.Registry
endpointRegistry endpoint.Registry
portalAllocator service.RangeRegistry
serviceNodePortAllocator service.RangeRegistry
// "Outputs"
Handler http.Handler
@@ -226,6 +233,15 @@ func setDefaults(c *Config) {
}
c.PortalNet = portalNet
}
if c.ServiceNodePorts.Size == 0 {
// TODO: Currently no way to specify an empty range (do we need to allow this?)
// We should probably allow this for clouds that don't require NodePort to do load-balancing (GCE)
// but then that breaks the strict nestedness of ServiceType.
// Review post-v1
defaultServiceNodePorts := util.PortRange{Base: 30000, Size: 2767}
c.ServiceNodePorts = defaultServiceNodePorts
glog.Infof("Node port range unspecified. Defaulting to %v.", c.ServiceNodePorts)
}
if c.MasterCount == 0 {
// Clearly, there will be at least one master.
c.MasterCount = 1
@@ -260,6 +276,7 @@ func setDefaults(c *Config) {
// Certain config fields will be set to a default value if unset,
// including:
// PortalNet
// ServiceNodePorts
// MasterCount
// ReadOnlyPort
// ReadWritePort
@@ -299,6 +316,7 @@ func New(c *Config) *Master {
m := &Master{
portalNet: c.PortalNet,
serviceNodePorts: c.ServiceNodePorts,
rootWebService: new(restful.WebService),
enableCoreControllers: c.EnableCoreControllers,
enableLogsSupport: c.EnableLogsSupport,
@@ -424,9 +442,23 @@ func (m *Master) init(c *Config) {
registry := etcd.NewRegistry(c.EtcdHelper, podRegistry, m.endpointRegistry)
m.serviceRegistry = registry
ipAllocator := ipallocator.NewCIDRRange(m.portalNet)
portalAllocator := etcdipallocator.NewEtcd(ipAllocator, c.EtcdHelper)
m.portalAllocator = portalAllocator
var portalRangeRegistry service.RangeRegistry
portalAllocator := ipallocator.NewAllocatorCIDRRange(m.portalNet, func(max int, rangeSpec string) allocator.Interface {
mem := allocator.NewAllocationMap(max, rangeSpec)
etcd := etcdallocator.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", c.EtcdHelper)
portalRangeRegistry = etcd
return etcd
})
m.portalAllocator = portalRangeRegistry
var serviceNodePortRegistry service.RangeRegistry
serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.serviceNodePorts, func(max int, rangeSpec string) allocator.Interface {
mem := allocator.NewAllocationMap(max, rangeSpec)
etcd := etcdallocator.NewEtcd(mem, "/ranges/servicenodeports", "servicenodeportallocation", c.EtcdHelper)
serviceNodePortRegistry = etcd
return etcd
})
m.serviceNodePortAllocator = serviceNodePortRegistry
controllerStorage := controlleretcd.NewREST(c.EtcdHelper)
@@ -444,7 +476,7 @@ func (m *Master) init(c *Config) {
"podTemplates": podTemplateStorage,
"replicationControllers": controllerStorage,
"services": service.NewStorage(m.serviceRegistry, m.nodeRegistry, m.endpointRegistry, portalAllocator, c.ClusterName),
"services": service.NewStorage(m.serviceRegistry, m.nodeRegistry, m.endpointRegistry, portalAllocator, serviceNodePortAllocator, c.ClusterName),
"endpoints": endpointsStorage,
"minions": nodeStorage,
"minions/status": nodeStatusStorage,
@@ -589,8 +621,12 @@ func (m *Master) NewBootstrapController() *Controller {
PortalNet: m.portalNet,
MasterCount: m.masterCount,
PortalIPInterval: 3 * time.Minute,
EndpointInterval: 10 * time.Second,
ServiceNodePortRegistry: m.serviceNodePortAllocator,
ServiceNodePorts: m.serviceNodePorts,
ServiceNodePortInterval: 3 * time.Minute,
PortalIPInterval: 3 * time.Minute,
EndpointInterval: 10 * time.Second,
PublicIP: m.clusterIP,

View File

@@ -40,7 +40,8 @@ type serviceInfo struct {
proxyPort int
socket proxySocket
timeout time.Duration
publicIPs []string // TODO: make this net.IP
nodePort int
loadBalancerStatus api.LoadBalancerStatus
sessionAffinityType api.ServiceAffinity
stickyMaxAgeMinutes int
}
@@ -61,12 +62,24 @@ type Proxier struct {
loadBalancer LoadBalancer
mu sync.Mutex // protects serviceMap
serviceMap map[ServicePortName]*serviceInfo
portMapMutex sync.Mutex
portMap map[portMapKey]ServicePortName
numProxyLoops int32 // use atomic ops to access this; mostly for testing
listenIP net.IP
iptables iptables.Interface
hostIP net.IP
}
// A key for the portMap
type portMapKey struct {
port int
protocol api.Protocol
}
func (k *portMapKey) String() string {
return fmt.Sprintf("%s/%d", k.protocol, k.port)
}
var (
// ErrProxyOnLocalhost is returned by NewProxier if the user requests a proxier on
// the loopback address. May be checked for by callers of NewProxier to know whether
@@ -113,6 +126,7 @@ func createProxier(loadBalancer LoadBalancer, listenIP net.IP, iptables iptables
return &Proxier{
loadBalancer: loadBalancer,
serviceMap: make(map[ServicePortName]*serviceInfo),
portMap: make(map[portMapKey]ServicePortName),
listenIP: listenIP,
iptables: iptables,
hostIP: hostIP,
@@ -273,7 +287,9 @@ func (proxier *Proxier) OnUpdate(services []api.Service) {
}
info.portalIP = serviceIP
info.portalPort = servicePort.Port
info.publicIPs = service.Spec.PublicIPs
// Deep-copy in case the service instance changes
info.loadBalancerStatus = *api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
info.nodePort = servicePort.NodePort
info.sessionAffinityType = service.Spec.SessionAffinity
glog.V(4).Infof("info: %+v", info)
@@ -302,13 +318,13 @@ func (proxier *Proxier) OnUpdate(services []api.Service) {
}
func sameConfig(info *serviceInfo, service *api.Service, port *api.ServicePort) bool {
if info.protocol != port.Protocol || info.portalPort != port.Port {
if info.protocol != port.Protocol || info.portalPort != port.Port || info.nodePort != port.NodePort {
return false
}
if !info.portalIP.Equal(net.ParseIP(service.Spec.PortalIP)) {
return false
}
if !ipsEqual(info.publicIPs, service.Spec.PublicIPs) {
if !api.LoadBalancerStatusEqual(&info.loadBalancerStatus, &service.Status.LoadBalancer) {
return false
}
if info.sessionAffinityType != service.Spec.SessionAffinity {
@@ -334,8 +350,16 @@ func (proxier *Proxier) openPortal(service ServicePortName, info *serviceInfo) e
if err != nil {
return err
}
for _, publicIP := range info.publicIPs {
err = proxier.openOnePortal(net.ParseIP(publicIP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)
for _, ingress := range info.loadBalancerStatus.Ingress {
if ingress.IP != "" {
err = proxier.openOnePortal(net.ParseIP(ingress.IP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)
if err != nil {
return err
}
}
}
if info.nodePort != 0 {
err = proxier.openNodePort(info.nodePort, info.protocol, proxier.listenIP, info.proxyPort, service)
if err != nil {
return err
}
@@ -346,7 +370,7 @@ func (proxier *Proxier) openPortal(service ServicePortName, info *serviceInfo) e
func (proxier *Proxier) openOnePortal(portalIP net.IP, portalPort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) error {
// Handle traffic from containers.
args := proxier.iptablesContainerPortalArgs(portalIP, portalPort, protocol, proxyIP, proxyPort, name)
existed, err := proxier.iptables.EnsureRule(iptables.TableNAT, iptablesContainerPortalChain, args...)
existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerPortalChain, name)
return err
@@ -357,7 +381,7 @@ func (proxier *Proxier) openOnePortal(portalIP net.IP, portalPort int, protocol
// Handle traffic from the host.
args = proxier.iptablesHostPortalArgs(portalIP, portalPort, protocol, proxyIP, proxyPort, name)
existed, err = proxier.iptables.EnsureRule(iptables.TableNAT, iptablesHostPortalChain, args...)
existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostPortalChain, name)
return err
@@ -368,11 +392,90 @@ func (proxier *Proxier) openOnePortal(portalIP net.IP, portalPort int, protocol
return nil
}
// Marks a port as being owned by a particular service, or returns error if already claimed.
// Idempotent: reclaiming with the same owner is not an error
func (proxier *Proxier) claimPort(port int, protocol api.Protocol, owner ServicePortName) error {
proxier.portMapMutex.Lock()
defer proxier.portMapMutex.Unlock()
// TODO: We could pre-populate some reserved ports into portMap and/or blacklist some well-known ports
key := portMapKey{port: port, protocol: protocol}
existing, found := proxier.portMap[key]
if !found {
proxier.portMap[key] = owner
return nil
}
if existing == owner {
// We are idempotent
return nil
}
return fmt.Errorf("Port conflict detected on port %v. %v vs %v", key, owner, existing)
}
// Release a claim on a port. Returns an error if the owner does not match the claim.
// Tolerates release on an unclaimed port, to simplify .
func (proxier *Proxier) releasePort(port int, protocol api.Protocol, owner ServicePortName) error {
proxier.portMapMutex.Lock()
defer proxier.portMapMutex.Unlock()
key := portMapKey{port: port, protocol: protocol}
existing, found := proxier.portMap[key]
if !found {
// We tolerate this, it happens if we are cleaning up a failed allocation
glog.Infof("Ignoring release on unowned port: %v", key)
return nil
}
if existing != owner {
return fmt.Errorf("Port conflict detected on port %v (unowned unlock). %v vs %v", key, owner, existing)
}
delete(proxier.portMap, key)
return nil
}
func (proxier *Proxier) openNodePort(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) error {
// TODO: Do we want to allow containers to access public services? Probably yes.
// TODO: We could refactor this to be the same code as portal, but with IP == nil
err := proxier.claimPort(nodePort, protocol, name)
if err != nil {
return err
}
// Handle traffic from containers.
args := proxier.iptablesContainerNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerNodePortChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerNodePortChain, name)
return err
}
if !existed {
glog.Infof("Opened iptables from-containers public port for service %q on %s port %d", name, protocol, nodePort)
}
// Handle traffic from the host.
args = proxier.iptablesHostNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostNodePortChain, args...)
if err != nil {
glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostNodePortChain, name)
return err
}
if !existed {
glog.Infof("Opened iptables from-host public port for service %q on %s port %d", name, protocol, nodePort)
}
return nil
}
func (proxier *Proxier) closePortal(service ServicePortName, info *serviceInfo) error {
// Collect errors and report them all at the end.
el := proxier.closeOnePortal(info.portalIP, info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)
for _, publicIP := range info.publicIPs {
el = append(el, proxier.closeOnePortal(net.ParseIP(publicIP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)...)
for _, ingress := range info.loadBalancerStatus.Ingress {
if ingress.IP != "" {
el = append(el, proxier.closeOnePortal(net.ParseIP(ingress.IP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)...)
}
}
if info.nodePort != 0 {
el = append(el, proxier.closeNodePort(info.nodePort, info.protocol, proxier.listenIP, info.proxyPort, service)...)
}
if len(el) == 0 {
glog.V(3).Infof("Closed iptables portals for service %q", service)
@@ -402,28 +505,94 @@ func (proxier *Proxier) closeOnePortal(portalIP net.IP, portalPort int, protocol
return el
}
func (proxier *Proxier) closeNodePort(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) []error {
el := []error{}
// Handle traffic from containers.
args := proxier.iptablesContainerNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerNodePortChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerNodePortChain, name)
el = append(el, err)
}
// Handle traffic from the host.
args = proxier.iptablesHostNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name)
if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostNodePortChain, args...); err != nil {
glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostNodePortChain, name)
el = append(el, err)
}
if err := proxier.releasePort(nodePort, protocol, name); err != nil {
el = append(el, err)
}
return el
}
// See comments in the *PortalArgs() functions for some details about why we
// use two chains.
// use two chains for portals.
var iptablesContainerPortalChain iptables.Chain = "KUBE-PORTALS-CONTAINER"
var iptablesHostPortalChain iptables.Chain = "KUBE-PORTALS-HOST"
// Chains for NodePort services
var iptablesContainerNodePortChain iptables.Chain = "KUBE-NODEPORT-CONTAINER"
var iptablesHostNodePortChain iptables.Chain = "KUBE-NODEPORT-HOST"
// Ensure that the iptables infrastructure we use is set up. This can safely be called periodically.
func iptablesInit(ipt iptables.Interface) error {
// TODO: There is almost certainly room for optimization here. E.g. If
// we knew the portal_net CIDR we could fast-track outbound packets not
// destined for a service. There's probably more, help wanted.
// Danger - order of these rules matters here:
//
// We match portal rules first, then NodePort rules. For NodePort rules, we filter primarily on --dst-type LOCAL,
// because we want to listen on all local addresses, but don't match internet traffic with the same dst port number.
//
// There is one complication (per thockin):
// -m addrtype --dst-type LOCAL is what we want except that it is broken (by intent without foresight to our usecase)
// on at least GCE. Specifically, GCE machines have a daemon which learns what external IPs are forwarded to that
// machine, and configure a local route for that IP, making a match for --dst-type LOCAL when we don't want it to.
// Removing the route gives correct behavior until the daemon recreates it.
// Killing the daemon is an option, but means that any non-kubernetes use of the machine with external IP will be broken.
//
// This applies to IPs on GCE that are actually from a load-balancer; they will be categorized as LOCAL.
// _If_ the chains were in the wrong order, and the LB traffic had dst-port == a NodePort on some other service,
// the NodePort would take priority (incorrectly).
// This is unlikely (and would only affect outgoing traffic from the cluster to the load balancer, which seems
// doubly-unlikely), but we need to be careful to keep the rules in the right order.
args := []string{ /* portal_net matching could go here */ }
args = append(args, "-m", "comment", "--comment", "handle Portals; NOTE: this must be before the NodePort rules")
if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesContainerPortalChain); err != nil {
return err
}
if _, err := ipt.EnsureRule(iptables.TableNAT, iptables.ChainPrerouting, "-j", string(iptablesContainerPortalChain)); err != nil {
if _, err := ipt.EnsureRule(iptables.Prepend, iptables.TableNAT, iptables.ChainPrerouting, append(args, "-j", string(iptablesContainerPortalChain))...); err != nil {
return err
}
if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesHostPortalChain); err != nil {
return err
}
if _, err := ipt.EnsureRule(iptables.TableNAT, iptables.ChainOutput, "-j", string(iptablesHostPortalChain)); err != nil {
if _, err := ipt.EnsureRule(iptables.Prepend, iptables.TableNAT, iptables.ChainOutput, append(args, "-j", string(iptablesHostPortalChain))...); err != nil {
return err
}
// This set of rules matches broadly (addrtype & destination port), and therefore must come after the portal rules
args = []string{"-m", "addrtype", "--dst-type", "LOCAL"}
args = append(args, "-m", "comment", "--comment", "handle service NodePorts; NOTE: this must be the last rule in the chain")
if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesContainerNodePortChain); err != nil {
return err
}
if _, err := ipt.EnsureRule(iptables.Append, iptables.TableNAT, iptables.ChainPrerouting, append(args, "-j", string(iptablesContainerNodePortChain))...); err != nil {
return err
}
if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesHostNodePortChain); err != nil {
return err
}
if _, err := ipt.EnsureRule(iptables.Append, iptables.TableNAT, iptables.ChainOutput, append(args, "-j", string(iptablesHostNodePortChain))...); err != nil {
return err
}
// TODO: Verify order of rules.
return nil
}
@@ -436,6 +605,12 @@ func iptablesFlush(ipt iptables.Interface) error {
if err := ipt.FlushChain(iptables.TableNAT, iptablesHostPortalChain); err != nil {
el = append(el, err)
}
if err := ipt.FlushChain(iptables.TableNAT, iptablesContainerNodePortChain); err != nil {
el = append(el, err)
}
if err := ipt.FlushChain(iptables.TableNAT, iptablesHostNodePortChain); err != nil {
el = append(el, err)
}
if len(el) != 0 {
glog.Errorf("Some errors flushing old iptables portals: %v", el)
}
@@ -464,9 +639,13 @@ func iptablesCommonPortalArgs(destIP net.IP, destPort int, protocol api.Protocol
"--comment", service.String(),
"-p", strings.ToLower(string(protocol)),
"-m", strings.ToLower(string(protocol)),
"-d", fmt.Sprintf("%s/32", destIP.String()),
"--dport", fmt.Sprintf("%d", destPort),
}
if destIP != nil {
args = append(args, "-d", fmt.Sprintf("%s/32", destIP.String()))
}
return args
}
@@ -550,6 +729,37 @@ func (proxier *Proxier) iptablesHostPortalArgs(destIP net.IP, destPort int, prot
return args
}
// Build a slice of iptables args for a from-container public-port rule.
// See iptablesContainerPortalArgs
// TODO: Should we just reuse iptablesContainerPortalArgs?
func (proxier *Proxier) iptablesContainerNodePortArgs(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, service ServicePortName) []string {
args := iptablesCommonPortalArgs(nil, nodePort, protocol, service)
if proxyIP.Equal(zeroIPv4) || proxyIP.Equal(zeroIPv6) {
// TODO: Can we REDIRECT with IPv6?
args = append(args, "-j", "REDIRECT", "--to-ports", fmt.Sprintf("%d", proxyPort))
} else {
// TODO: Can we DNAT with IPv6?
args = append(args, "-j", "DNAT", "--to-destination", net.JoinHostPort(proxyIP.String(), strconv.Itoa(proxyPort)))
}
return args
}
// Build a slice of iptables args for a from-host public-port rule.
// See iptablesHostPortalArgs
// TODO: Should we just reuse iptablesHostPortalArgs?
func (proxier *Proxier) iptablesHostNodePortArgs(nodePort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, service ServicePortName) []string {
args := iptablesCommonPortalArgs(nil, nodePort, protocol, service)
if proxyIP.Equal(zeroIPv4) || proxyIP.Equal(zeroIPv6) {
proxyIP = proxier.hostIP
}
// TODO: Can we DNAT with IPv6?
args = append(args, "-j", "DNAT", "--to-destination", net.JoinHostPort(proxyIP.String(), strconv.Itoa(proxyPort)))
return args
}
func isTooManyFDsError(err error) bool {
return strings.Contains(err.Error(), "too many open files")
}

View File

@@ -92,7 +92,7 @@ func (fake *fakeIptables) FlushChain(table iptables.Table, chain iptables.Chain)
return nil
}
func (fake *fakeIptables) EnsureRule(table iptables.Table, chain iptables.Chain, args ...string) (bool, error) {
func (fake *fakeIptables) EnsureRule(position iptables.RulePosition, table iptables.Table, chain iptables.Chain, args ...string) (bool, error) {
return false, nil
}
@@ -723,8 +723,8 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
Port: svcInfo.portalPort,
Protocol: "TCP",
}},
PortalIP: svcInfo.portalIP.String(),
PublicIPs: []string{"4.3.2.1"},
PortalIP: svcInfo.portalIP.String(),
DeprecatedPublicIPs: []string{"4.3.2.1"},
},
}})
// Wait for the socket to actually get free.
@@ -810,3 +810,5 @@ func TestProxyUpdatePortal(t *testing.T) {
}
// TODO: Test UDP timeouts.
// TODO(justinsb): Add test for nodePort conflict detection, once we have nodePort wired in

View File

@@ -601,6 +601,7 @@ func TestEtcdUpdateService(t *testing.T) {
"baz": "bar",
},
SessionAffinity: "None",
Type: api.ServiceTypeClusterIP,
},
}
_, err := registry.UpdateService(ctx, &testService)

View File

@@ -0,0 +1,168 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package allocator
import (
"errors"
"math/big"
"math/rand"
"sync"
)
// AllocationBitmap is a contiguous block of resources that can be allocated atomically.
//
// Each resource has an offset. The internal structure is a bitmap, with a bit for each offset.
//
// If a resource is taken, the bit at that offset is set to one.
// r.count is always equal to the number of set bits and can be recalculated at any time
// by counting the set bits in r.allocated.
//
// TODO: use RLE and compact the allocator to minimize space.
type AllocationBitmap struct {
// strategy is the strategy for choosing the next available item out of the range
strategy allocateStrategy
// max is the maximum size of the usable items in the range
max int
// rangeSpec is the range specifier, matching RangeAllocation.Range
rangeSpec string
// lock guards the following members
lock sync.Mutex
// count is the number of currently allocated elements in the range
count int
// allocated is a bit array of the allocated items in the range
allocated *big.Int
}
// AllocationBitmap implements Interface and Snapshottable
var _ Interface = &AllocationBitmap{}
var _ Snapshottable = &AllocationBitmap{}
// allocateStrategy is a search strategy in the allocation map for a valid item.
type allocateStrategy func(allocated *big.Int, max, count int) (int, bool)
func NewAllocationMap(max int, rangeSpec string) *AllocationBitmap {
a := AllocationBitmap{
strategy: randomScanStrategy,
allocated: big.NewInt(0),
count: 0,
max: max,
rangeSpec: rangeSpec,
}
return &a
}
// Allocate attempts to reserve the provided item.
// Returns true if it was allocated, false if it was already in use
func (r *AllocationBitmap) Allocate(offset int) (bool, error) {
r.lock.Lock()
defer r.lock.Unlock()
if r.allocated.Bit(offset) == 1 {
return false, nil
}
r.allocated = r.allocated.SetBit(r.allocated, offset, 1)
r.count++
return true, nil
}
// AllocateNext reserves one of the items from the pool.
// (0, false, nil) may be returned if there are no items left.
func (r *AllocationBitmap) AllocateNext() (int, bool, error) {
r.lock.Lock()
defer r.lock.Unlock()
next, ok := r.strategy(r.allocated, r.max, r.count)
if !ok {
return 0, false, nil
}
r.count++
r.allocated = r.allocated.SetBit(r.allocated, next, 1)
return next, true, nil
}
// Release releases the item back to the pool. Releasing an
// unallocated item or an item out of the range is a no-op and
// returns no error.
func (r *AllocationBitmap) Release(offset int) error {
r.lock.Lock()
defer r.lock.Unlock()
if r.allocated.Bit(offset) == 0 {
return nil
}
r.allocated = r.allocated.SetBit(r.allocated, offset, 0)
r.count--
return nil
}
// Has returns true if the provided item is already allocated and a call
// to Allocate(offset) would fail.
func (r *AllocationBitmap) Has(offset int) bool {
r.lock.Lock()
defer r.lock.Unlock()
return r.allocated.Bit(offset) == 1
}
// Free returns the count of items left in the range.
func (r *AllocationBitmap) Free() int {
r.lock.Lock()
defer r.lock.Unlock()
return r.max - r.count
}
// Snapshot saves the current state of the pool.
func (r *AllocationBitmap) Snapshot() (string, []byte) {
r.lock.Lock()
defer r.lock.Unlock()
return r.rangeSpec, r.allocated.Bytes()
}
// Restore restores the pool to the previously captured state.
func (r *AllocationBitmap) Restore(rangeSpec string, data []byte) error {
r.lock.Lock()
defer r.lock.Unlock()
if r.rangeSpec != rangeSpec {
return errors.New("the provided range does not match the current range")
}
r.allocated = big.NewInt(0).SetBytes(data)
r.count = countBits(r.allocated)
return nil
}
// randomScanStrategy chooses a random address from the provided big.Int, and then
// scans forward looking for the next available address (it will wrap the range if
// necessary).
func randomScanStrategy(allocated *big.Int, max, count int) (int, bool) {
if count >= max {
return 0, false
}
offset := rand.Intn(max)
for i := 0; i < max; i++ {
at := (offset + i) % max
if allocated.Bit(at) == 0 {
return at, true
}
}
return 0, false
}

View File

@@ -0,0 +1,239 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd
import (
"errors"
"fmt"
"sync"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
k8serr "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
etcderr "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors/etcd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/allocator"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
)
var (
errorUnableToAllocate = errors.New("unable to allocate")
)
// Etcd exposes a service.Allocator that is backed by etcd.
// TODO: allow multiple allocations to be tried at once
// TODO: subdivide the keyspace to reduce conflicts
// TODO: investigate issuing a CAS without reading first
type Etcd struct {
lock sync.Mutex
alloc allocator.Snapshottable
helper tools.EtcdHelper
last string
baseKey string
kind string
}
// Etcd implements allocator.Interface and service.RangeRegistry
var _ allocator.Interface = &Etcd{}
var _ service.RangeRegistry = &Etcd{}
// NewEtcd returns an allocator that is backed by Etcd and can manage
// persisting the snapshot state of allocation after each allocation is made.
func NewEtcd(alloc allocator.Snapshottable, baseKey string, kind string, helper tools.EtcdHelper) *Etcd {
return &Etcd{
alloc: alloc,
helper: helper,
baseKey: baseKey,
kind: kind,
}
}
// Allocate attempts to allocate the item locally and then in etcd.
func (e *Etcd) Allocate(offset int) (bool, error) {
e.lock.Lock()
defer e.lock.Unlock()
ok, err := e.alloc.Allocate(offset)
if !ok || err != nil {
return ok, err
}
err = e.tryUpdate(func() error {
ok, err := e.alloc.Allocate(offset)
if err != nil {
return err
}
if !ok {
return errorUnableToAllocate
}
return nil
})
if err != nil {
if err == errorUnableToAllocate {
return false, nil
}
return false, err
}
return true, nil
}
// AllocateNext attempts to allocate the next item locally and then in etcd.
func (e *Etcd) AllocateNext() (int, bool, error) {
e.lock.Lock()
defer e.lock.Unlock()
offset, ok, err := e.alloc.AllocateNext()
if !ok || err != nil {
return offset, ok, err
}
err = e.tryUpdate(func() error {
ok, err := e.alloc.Allocate(offset)
if err != nil {
return err
}
if !ok {
// update the offset here
offset, ok, err = e.alloc.AllocateNext()
if err != nil {
return err
}
if !ok {
return errorUnableToAllocate
}
return nil
}
return nil
})
return offset, ok, err
}
// Release attempts to release the provided item locally and then in etcd.
func (e *Etcd) Release(item int) error {
e.lock.Lock()
defer e.lock.Unlock()
if err := e.alloc.Release(item); err != nil {
return err
}
return e.tryUpdate(func() error {
return e.alloc.Release(item)
})
}
// tryUpdate performs a read-update to persist the latest snapshot state of allocation.
func (e *Etcd) tryUpdate(fn func() error) error {
err := e.helper.GuaranteedUpdate(e.baseKey, &api.RangeAllocation{}, true,
func(input runtime.Object) (output runtime.Object, ttl uint64, err error) {
existing := input.(*api.RangeAllocation)
if len(existing.ResourceVersion) == 0 {
return nil, 0, fmt.Errorf("cannot allocate resources of type %s at this time", e.kind)
}
if existing.ResourceVersion != e.last {
if err := e.alloc.Restore(existing.Range, existing.Data); err != nil {
return nil, 0, err
}
if err := fn(); err != nil {
return nil, 0, err
}
}
e.last = existing.ResourceVersion
rangeSpec, data := e.alloc.Snapshot()
existing.Range = rangeSpec
existing.Data = data
return existing, 0, nil
},
)
return etcderr.InterpretUpdateError(err, e.kind, "")
}
// Refresh reloads the RangeAllocation from etcd.
func (e *Etcd) Refresh() (*api.RangeAllocation, error) {
e.lock.Lock()
defer e.lock.Unlock()
existing := &api.RangeAllocation{}
if err := e.helper.ExtractObj(e.baseKey, existing, false); err != nil {
if tools.IsEtcdNotFound(err) {
return nil, nil
}
return nil, etcderr.InterpretGetError(err, e.kind, "")
}
return existing, nil
}
// Get returns an api.RangeAllocation that represents the current state in
// etcd. If the key does not exist, the object will have an empty ResourceVersion.
func (e *Etcd) Get() (*api.RangeAllocation, error) {
existing := &api.RangeAllocation{}
if err := e.helper.ExtractObj(e.baseKey, existing, true); err != nil {
return nil, etcderr.InterpretGetError(err, e.kind, "")
}
return existing, nil
}
// CreateOrUpdate attempts to update the current etcd state with the provided
// allocation.
func (e *Etcd) CreateOrUpdate(snapshot *api.RangeAllocation) error {
e.lock.Lock()
defer e.lock.Unlock()
last := ""
err := e.helper.GuaranteedUpdate(e.baseKey, &api.RangeAllocation{}, true,
func(input runtime.Object) (output runtime.Object, ttl uint64, err error) {
existing := input.(*api.RangeAllocation)
switch {
case len(snapshot.ResourceVersion) != 0 && len(existing.ResourceVersion) != 0:
if snapshot.ResourceVersion != existing.ResourceVersion {
return nil, 0, k8serr.NewConflict(e.kind, "", fmt.Errorf("the provided resource version does not match"))
}
case len(existing.ResourceVersion) != 0:
return nil, 0, k8serr.NewConflict(e.kind, "", fmt.Errorf("another caller has already initialized the resource"))
}
last = snapshot.ResourceVersion
return snapshot, 0, nil
},
)
if err != nil {
return etcderr.InterpretUpdateError(err, e.kind, "")
}
err = e.alloc.Restore(snapshot.Range, snapshot.Data)
if err == nil {
e.last = last
}
return err
}
// Implements allocator.Interface::Has
func (e *Etcd) Has(item int) bool {
e.lock.Lock()
defer e.lock.Unlock()
return e.alloc.Has(item)
}
// Implements allocator.Interface::Free
func (e *Etcd) Free() int {
e.lock.Lock()
defer e.lock.Unlock()
return e.alloc.Free()
}

View File

@@ -0,0 +1,125 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd
import (
"testing"
"github.com/coreos/go-etcd/etcd"
"fmt"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/allocator"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest"
)
func newHelper(t *testing.T) (*tools.FakeEtcdClient, tools.EtcdHelper) {
fakeEtcdClient := tools.NewFakeEtcdClient(t)
fakeEtcdClient.TestIndex = true
helper := tools.NewEtcdHelper(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix())
return fakeEtcdClient, helper
}
func newStorage(t *testing.T) (*Etcd, allocator.Interface, *tools.FakeEtcdClient) {
fakeEtcdClient, h := newHelper(t)
mem := allocator.NewAllocationMap(100, "rangeSpecValue")
etcd := NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", h)
return etcd, mem, fakeEtcdClient
}
func key() string {
s := "/ranges/serviceips"
return etcdtest.AddPrefix(s)
}
func TestEmpty(t *testing.T) {
storage, _, ecli := newStorage(t)
ecli.ExpectNotFoundGet(key())
if _, err := storage.Allocate(1); fmt.Sprintf("%v", err) != "cannot allocate resources of type serviceipallocation at this time" {
t.Fatal(err)
}
}
func initialObject(ecli *tools.FakeEtcdClient) {
ecli.Data[key()] = tools.EtcdResponseWithError{
R: &etcd.Response{
Node: &etcd.Node{
CreatedIndex: 1,
ModifiedIndex: 2,
Value: runtime.EncodeOrDie(testapi.Codec(), &api.RangeAllocation{
Range: "rangeSpecValue",
}),
},
},
E: nil,
}
}
func TestStore(t *testing.T) {
storage, backing, ecli := newStorage(t)
initialObject(ecli)
if _, err := storage.Allocate(2); err != nil {
t.Fatal(err)
}
ok, err := backing.Allocate(2)
if err != nil {
t.Fatal(err)
}
if ok {
t.Fatal("Expected backing allocation to fail")
}
if ok, err := storage.Allocate(2); ok || err != nil {
t.Fatal("Expected allocation to fail")
}
obj := ecli.Data[key()]
if obj.R == nil || obj.R.Node == nil {
t.Fatalf("%s is empty: %#v", key(), obj)
}
t.Logf("data: %#v", obj.R.Node)
other := allocator.NewAllocationMap(100, "rangeSpecValue")
allocation := &api.RangeAllocation{}
if err := storage.helper.ExtractObj(key(), allocation, false); err != nil {
t.Fatal(err)
}
if allocation.ResourceVersion != "1" {
t.Fatalf("%#v", allocation)
}
if allocation.Range != "rangeSpecValue" {
t.Errorf("unexpected stored Range: %s", allocation.Range)
}
if err := other.Restore("rangeSpecValue", allocation.Data); err != nil {
t.Fatal(err)
}
if !other.Has(2) {
t.Fatalf("could not restore allocated IP: %#v", other)
}
other = allocator.NewAllocationMap(100, "rangeSpecValue")
otherStorage := NewEtcd(other, "/ranges/serviceips", "serviceipallocation", storage.helper)
if ok, err := otherStorage.Allocate(2); ok || err != nil {
t.Fatal(err)
}
}

View File

@@ -0,0 +1,41 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package allocator
// Interface manages the allocation of items out of a range. Interface
// should be threadsafe.
type Interface interface {
Allocate(int) (bool, error)
AllocateNext() (int, bool, error)
Release(int) error
// For testing
Has(int) bool
// For testing
Free() int
}
// Snapshottable is an Interface that can be snapshotted and restored. Snapshottable
// should be threadsafe.
type Snapshottable interface {
Interface
Snapshot() (string, []byte)
Restore(string, []byte) error
}
type AllocatorFactory func(max int, rangeSpec string) Interface

View File

@@ -0,0 +1,64 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package allocator
import "math/big"
// countBits returns the number of set bits in n
func countBits(n *big.Int) int {
var count int = 0
for _, b := range n.Bytes() {
count += int(bitCounts[b])
}
return count
}
// bitCounts is all of the bits counted for each number between 0-255
var bitCounts = []int8{
0, 1, 1, 2, 1, 2, 2, 3,
1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4,
2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4,
2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4,
2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6,
4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4,
2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6,
4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6,
4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6,
4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7,
5, 6, 6, 7, 6, 7, 7, 8,
}

View File

@@ -0,0 +1,33 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package allocator
import "testing"
func TestBitCount(t *testing.T) {
for i, c := range bitCounts {
actual := 0
for j := 0; j < 8; j++ {
if ((1 << uint(j)) & i) != 0 {
actual++
}
}
if actual != int(c) {
t.Errorf("%d should have %d bits but recorded as %d", i, actual, c)
}
}
}

View File

@@ -19,10 +19,10 @@ package ipallocator
import (
"errors"
"fmt"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/allocator"
"math/big"
"math/rand"
"net"
"sync"
)
// Interface manages the allocation of IP addresses out of a range. Interface
@@ -33,20 +33,11 @@ type Interface interface {
Release(net.IP) error
}
// Snapshottable is an Interface that can be snapshotted and restored. Snapshottable
// should be threadsafe.
type Snapshottable interface {
Interface
Snapshot() (*net.IPNet, []byte)
Restore(*net.IPNet, []byte) error
}
var (
ErrFull = errors.New("range is full")
ErrNotInRange = errors.New("provided IP is not in the valid range")
ErrAllocated = errors.New("provided IP is already allocated")
ErrMismatchedNetwork = errors.New("the provided network does not match the current range")
ErrAllocationDisabled = errors.New("IP addresses cannot be allocated at this time")
ErrFull = errors.New("range is full")
ErrNotInRange = errors.New("provided IP is not in the valid range")
ErrAllocated = errors.New("provided IP is already allocated")
ErrMismatchedNetwork = errors.New("the provided network does not match the current range")
)
// Range is a contiguous block of IPs that can be allocated atomically.
@@ -64,52 +55,39 @@ var (
// | |
// r.base r.base + r.max
// | |
// first bit of r.allocated last bit of r.allocated
//
// If an address is taken, the bit at offset:
//
// bit offset := IP - r.base
//
// is set to one. r.count is always equal to the number of set bits and
// can be recalculated at any time by counting the set bits in r.allocated.
//
// TODO: use RLE and compact the allocator to minimize space.
// offset #0 of r.allocated last offset of r.allocated
type Range struct {
net *net.IPNet
// base is a cached version of the start IP in the CIDR range as a *big.Int
base *big.Int
// strategy is the strategy for choosing the next available IP out of the range
strategy allocateStrategy
// max is the maximum size of the usable addresses in the range
max int
// lock guards the following members
lock sync.Mutex
// count is the number of currently allocated elements in the range
count int
// allocated is a bit array of the allocated ips in the range
allocated *big.Int
alloc allocator.Interface
}
// allocateStrategy is a search strategy in the allocation map for a valid IP.
type allocateStrategy func(allocated *big.Int, max, count int) (int, error)
// NewCIDRRange creates a Range over a net.IPNet.
func NewCIDRRange(cidr *net.IPNet) *Range {
// NewAllocatorCIDRRange creates a Range over a net.IPNet, calling allocatorFactory to construct the backing store.
func NewAllocatorCIDRRange(cidr *net.IPNet, allocatorFactory allocator.AllocatorFactory) *Range {
max := RangeSize(cidr)
base := bigForIP(cidr.IP)
r := Range{
net: cidr,
strategy: randomScanStrategy,
base: base.Add(base, big.NewInt(1)), // don't use the network base
max: maximum(0, int(max-2)), // don't use the network broadcast
rangeSpec := cidr.String()
allocated: big.NewInt(0),
count: 0,
r := Range{
net: cidr,
base: base.Add(base, big.NewInt(1)), // don't use the network base
max: maximum(0, int(max-2)), // don't use the network broadcast,
}
r.alloc = allocatorFactory(r.max, rangeSpec)
return &r
}
// Helper that wraps NewAllocatorCIDRRange, for creating a range backed by an in-memory store.
func NewCIDRRange(cidr *net.IPNet) *Range {
return NewAllocatorCIDRRange(cidr, func(max int, rangeSpec string) allocator.Interface {
return allocator.NewAllocationMap(max, rangeSpec)
})
}
func maximum(a, b int) int {
if a > b {
return a
@@ -119,9 +97,7 @@ func maximum(a, b int) int {
// Free returns the count of IP addresses left in the range.
func (r *Range) Free() int {
r.lock.Lock()
defer r.lock.Unlock()
return r.max - r.count
return r.alloc.Free()
}
// Allocate attempts to reserve the provided IP. ErrNotInRange or
@@ -134,30 +110,27 @@ func (r *Range) Allocate(ip net.IP) error {
return ErrNotInRange
}
r.lock.Lock()
defer r.lock.Unlock()
if r.allocated.Bit(offset) == 1 {
allocated, err := r.alloc.Allocate(offset)
if err != nil {
return err
}
if !allocated {
return ErrAllocated
}
r.allocated = r.allocated.SetBit(r.allocated, offset, 1)
r.count++
return nil
}
// AllocateNext reserves one of the IPs from the pool. ErrFull may
// be returned if there are no addresses left.
func (r *Range) AllocateNext() (net.IP, error) {
r.lock.Lock()
defer r.lock.Unlock()
next, err := r.strategy(r.allocated, r.max, r.count)
offset, ok, err := r.alloc.AllocateNext()
if err != nil {
return nil, err
}
r.count++
r.allocated = r.allocated.SetBit(r.allocated, next, 1)
return addIPOffset(r.base, next), nil
if !ok {
return nil, ErrFull
}
return addIPOffset(r.base, offset), nil
}
// Release releases the IP back to the pool. Releasing an
@@ -169,16 +142,7 @@ func (r *Range) Release(ip net.IP) error {
return nil
}
r.lock.Lock()
defer r.lock.Unlock()
if r.allocated.Bit(offset) == 0 {
return nil
}
r.allocated = r.allocated.SetBit(r.allocated, offset, 0)
r.count--
return nil
return r.alloc.Release(offset)
}
// Has returns true if the provided IP is already allocated and a call
@@ -189,31 +153,32 @@ func (r *Range) Has(ip net.IP) bool {
return false
}
r.lock.Lock()
defer r.lock.Unlock()
return r.allocated.Bit(offset) == 1
return r.alloc.Has(offset)
}
// Snapshot saves the current state of the pool.
func (r *Range) Snapshot() (*net.IPNet, []byte) {
r.lock.Lock()
defer r.lock.Unlock()
return r.net, r.allocated.Bytes()
func (r *Range) Snapshot(dst *api.RangeAllocation) error {
snapshottable, ok := r.alloc.(allocator.Snapshottable)
if !ok {
return fmt.Errorf("not a snapshottable allocator")
}
rangeString, data := snapshottable.Snapshot()
dst.Range = rangeString
dst.Data = data
return nil
}
// Restore restores the pool to the previously captured state. ErrMismatchedNetwork
// is returned if the provided IPNet range doesn't exactly match the previous range.
func (r *Range) Restore(net *net.IPNet, data []byte) error {
r.lock.Lock()
defer r.lock.Unlock()
if !net.IP.Equal(r.net.IP) || net.Mask.String() != r.net.Mask.String() {
return ErrMismatchedNetwork
}
r.allocated = big.NewInt(0).SetBytes(data)
r.count = countBits(r.allocated)
snapshottable, ok := r.alloc.(allocator.Snapshottable)
if !ok {
return fmt.Errorf("not a snapshottable allocator")
}
snapshottable.Restore(net.String(), data)
return nil
}
@@ -252,68 +217,6 @@ func calculateIPOffset(base *big.Int, ip net.IP) int {
return int(big.NewInt(0).Sub(bigForIP(ip), base).Int64())
}
// randomScanStrategy chooses a random address from the provided big.Int, and then
// scans forward looking for the next available address (it will wrap the range if
// necessary).
func randomScanStrategy(allocated *big.Int, max, count int) (int, error) {
if count >= max {
return 0, ErrFull
}
offset := rand.Intn(max)
for i := 0; i < max; i++ {
at := (offset + i) % max
if allocated.Bit(at) == 0 {
return at, nil
}
}
return 0, ErrFull
}
// countBits returns the number of set bits in n
func countBits(n *big.Int) int {
var count int = 0
for _, b := range n.Bytes() {
count += int(bitCounts[b])
}
return count
}
// bitCounts is all of the bits counted for each number between 0-255
var bitCounts = []int8{
0, 1, 1, 2, 1, 2, 2, 3,
1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4,
2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4,
2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4,
2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6,
4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4,
2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6,
4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5,
3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6,
4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6,
4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7,
5, 6, 6, 7, 6, 7, 7, 8,
}
// RangeSize returns the size of a range in valid addresses.
func RangeSize(subnet *net.IPNet) int64 {
ones, bits := subnet.Mask.Size()

View File

@@ -20,6 +20,7 @@ import (
"net"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
@@ -142,27 +143,13 @@ func TestAllocateSmall(t *testing.T) {
}
}
if r.count != 2 && r.Free() != 0 && r.max != 2 {
if r.Free() != 0 && r.max != 2 {
t.Fatalf("unexpected range: %v", r)
}
t.Logf("allocated: %v", found)
}
func TestBitCount(t *testing.T) {
for i, c := range bitCounts {
actual := 0
for j := 0; j < 8; j++ {
if ((1 << uint(j)) & i) != 0 {
actual++
}
}
if actual != int(c) {
t.Errorf("%d should have %d bits but recorded as %d", i, actual, c)
}
}
}
func TestRangeSize(t *testing.T) {
testCases := map[string]int64{
"192.168.1.0/24": 256,
@@ -195,7 +182,17 @@ func TestSnapshot(t *testing.T) {
ip = append(ip, n)
}
network, data := r.Snapshot()
var dst api.RangeAllocation
err = r.Snapshot(&dst)
if err != nil {
t.Fatal(err)
}
_, network, err := net.ParseCIDR(dst.Range)
if err != nil {
t.Fatal(err)
}
if !network.IP.Equal(cidr.IP) || network.Mask.String() != cidr.Mask.String() {
t.Fatalf("mismatched networks: %s : %s", network, cidr)
}
@@ -205,11 +202,11 @@ func TestSnapshot(t *testing.T) {
t.Fatal(err)
}
other := NewCIDRRange(otherCidr)
if err := r.Restore(otherCidr, data); err != ErrMismatchedNetwork {
if err := r.Restore(otherCidr, dst.Data); err != ErrMismatchedNetwork {
t.Fatal(err)
}
other = NewCIDRRange(network)
if err := other.Restore(network, data); err != nil {
if err := other.Restore(network, dst.Data); err != nil {
t.Fatal(err)
}

View File

@@ -46,12 +46,12 @@ type Repair struct {
interval time.Duration
registry service.Registry
network *net.IPNet
alloc service.IPRegistry
alloc service.RangeRegistry
}
// NewRepair creates a controller that periodically ensures that all portalIPs are uniquely allocated across the cluster
// and generates informational warnings for a cluster that is not in sync.
func NewRepair(interval time.Duration, registry service.Registry, network *net.IPNet, alloc service.IPRegistry) *Repair {
func NewRepair(interval time.Duration, registry service.Registry, network *net.IPNet, alloc service.RangeRegistry) *Repair {
return &Repair{
interval: interval,
registry: registry,
@@ -71,6 +71,13 @@ func (c *Repair) RunUntil(ch chan struct{}) {
// RunOnce verifies the state of the portal IP allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error {
// TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read,
// or if they are executed against different leaders,
// the ordering guarantee required to ensure no IP is allocated twice is violated.
// ListServices must return a ResourceVersion higher than the etcd index Get triggers,
// and the release code must not release services that have had IPs allocated but not yet been created
// See #8295
latest, err := c.alloc.Get()
if err != nil {
return fmt.Errorf("unable to refresh the service IP block: %v", err)
@@ -111,7 +118,10 @@ func (c *Repair) RunOnce() error {
}
}
service.SnapshotRange(latest, r)
err = r.Snapshot(latest)
if err != nil {
return fmt.Errorf("unable to persist the updated service IP allocations: %v", err)
}
if err := c.alloc.CreateOrUpdate(latest); err != nil {
return fmt.Errorf("unable to persist the updated service IP allocations: %v", err)

View File

@@ -27,7 +27,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/ipallocator"
)
type mockIPRegistry struct {
type mockRangeRegistry struct {
getCalled bool
item *api.RangeAllocation
err error
@@ -37,12 +37,12 @@ type mockIPRegistry struct {
updateErr error
}
func (r *mockIPRegistry) Get() (*api.RangeAllocation, error) {
func (r *mockRangeRegistry) Get() (*api.RangeAllocation, error) {
r.getCalled = true
return r.item, r.err
}
func (r *mockIPRegistry) CreateOrUpdate(alloc *api.RangeAllocation) error {
func (r *mockRangeRegistry) CreateOrUpdate(alloc *api.RangeAllocation) error {
r.updateCalled = true
r.updated = alloc
return r.updateErr
@@ -51,7 +51,7 @@ func (r *mockIPRegistry) CreateOrUpdate(alloc *api.RangeAllocation) error {
func TestRepair(t *testing.T) {
registry := registrytest.NewServiceRegistry()
_, cidr, _ := net.ParseCIDR("192.168.1.0/24")
ipregistry := &mockIPRegistry{
ipregistry := &mockRangeRegistry{
item: &api.RangeAllocation{},
}
r := NewRepair(0, registry, cidr, ipregistry)
@@ -63,7 +63,7 @@ func TestRepair(t *testing.T) {
t.Errorf("unexpected ipregistry: %#v", ipregistry)
}
ipregistry = &mockIPRegistry{
ipregistry = &mockRangeRegistry{
item: &api.RangeAllocation{},
updateErr: fmt.Errorf("test error"),
}
@@ -77,16 +77,21 @@ func TestRepairEmpty(t *testing.T) {
_, cidr, _ := net.ParseCIDR("192.168.1.0/24")
previous := ipallocator.NewCIDRRange(cidr)
previous.Allocate(net.ParseIP("192.168.1.10"))
network, data := previous.Snapshot()
var dst api.RangeAllocation
err := previous.Snapshot(&dst)
if err != nil {
t.Fatal(err)
}
registry := registrytest.NewServiceRegistry()
ipregistry := &mockIPRegistry{
ipregistry := &mockRangeRegistry{
item: &api.RangeAllocation{
ObjectMeta: api.ObjectMeta{
ResourceVersion: "1",
},
Range: network.String(),
Data: data,
Range: dst.Range,
Data: dst.Data,
},
}
r := NewRepair(0, registry, cidr, ipregistry)
@@ -105,7 +110,13 @@ func TestRepairEmpty(t *testing.T) {
func TestRepairWithExisting(t *testing.T) {
_, cidr, _ := net.ParseCIDR("192.168.1.0/24")
previous := ipallocator.NewCIDRRange(cidr)
network, data := previous.Snapshot()
var dst api.RangeAllocation
err := previous.Snapshot(&dst)
if err != nil {
t.Fatal(err)
}
registry := registrytest.NewServiceRegistry()
registry.List = api.ServiceList{
Items: []api.Service{
@@ -130,13 +141,13 @@ func TestRepairWithExisting(t *testing.T) {
},
}
ipregistry := &mockIPRegistry{
ipregistry := &mockRangeRegistry{
item: &api.RangeAllocation{
ObjectMeta: api.ObjectMeta{
ResourceVersion: "1",
},
Range: network.String(),
Data: data,
Range: dst.Range,
Data: dst.Data,
},
}
r := NewRepair(0, registry, cidr, ipregistry)

View File

@@ -16,179 +16,4 @@ limitations under the License.
package etcd
import (
"fmt"
"net"
"sync"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
etcderr "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors/etcd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/ipallocator"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
)
// Etcd exposes a service.Allocator that is backed by etcd.
// TODO: allow multiple allocations to be tried at once
// TODO: subdivide the keyspace to reduce conflicts
// TODO: investigate issuing a CAS without reading first
type Etcd struct {
lock sync.Mutex
alloc ipallocator.Snapshottable
helper tools.EtcdHelper
last string
}
// Etcd implements ipallocator.Interface and service.IPRegistry
var _ ipallocator.Interface = &Etcd{}
var _ service.IPRegistry = &Etcd{}
const baseKey = "/ranges/serviceips"
// NewEtcd returns a service PortalIP ipallocator that is backed by Etcd and can manage
// persisting the snapshot state of allocation after each allocation is made.
func NewEtcd(alloc ipallocator.Snapshottable, helper tools.EtcdHelper) *Etcd {
return &Etcd{
alloc: alloc,
helper: helper,
}
}
// Allocate attempts to allocate the IP locally and then in etcd.
func (e *Etcd) Allocate(ip net.IP) error {
e.lock.Lock()
defer e.lock.Unlock()
if err := e.alloc.Allocate(ip); err != nil {
return err
}
return e.tryUpdate(func() error {
return e.alloc.Allocate(ip)
})
}
// AllocateNext attempts to allocate the next IP locally and then in etcd.
func (e *Etcd) AllocateNext() (net.IP, error) {
e.lock.Lock()
defer e.lock.Unlock()
ip, err := e.alloc.AllocateNext()
if err != nil {
return nil, err
}
err = e.tryUpdate(func() error {
if err := e.alloc.Allocate(ip); err != nil {
if err != ipallocator.ErrAllocated {
return err
}
// update the ip here
ip, err = e.alloc.AllocateNext()
if err != nil {
return err
}
}
return nil
})
return ip, err
}
// Release attempts to release the provided IP locally and then in etcd.
func (e *Etcd) Release(ip net.IP) error {
e.lock.Lock()
defer e.lock.Unlock()
if err := e.alloc.Release(ip); err != nil {
return err
}
return e.tryUpdate(func() error {
return e.alloc.Release(ip)
})
}
// tryUpdate performs a read-update to persist the latest snapshot state of allocation.
func (e *Etcd) tryUpdate(fn func() error) error {
err := e.helper.GuaranteedUpdate(baseKey, &api.RangeAllocation{}, true,
func(input runtime.Object) (output runtime.Object, ttl uint64, err error) {
existing := input.(*api.RangeAllocation)
if len(existing.ResourceVersion) == 0 {
return nil, 0, ipallocator.ErrAllocationDisabled
}
if existing.ResourceVersion != e.last {
if err := service.RestoreRange(e.alloc, existing); err != nil {
return nil, 0, err
}
if err := fn(); err != nil {
return nil, 0, err
}
}
e.last = existing.ResourceVersion
service.SnapshotRange(existing, e.alloc)
return existing, 0, nil
},
)
return etcderr.InterpretUpdateError(err, "serviceipallocation", "")
}
// Refresh reloads the ipallocator from etcd.
func (e *Etcd) Refresh() error {
e.lock.Lock()
defer e.lock.Unlock()
existing := &api.RangeAllocation{}
if err := e.helper.ExtractObj(baseKey, existing, false); err != nil {
if tools.IsEtcdNotFound(err) {
return ipallocator.ErrAllocationDisabled
}
return etcderr.InterpretGetError(err, "serviceipallocation", "")
}
return service.RestoreRange(e.alloc, existing)
}
// Get returns an api.RangeAllocation that represents the current state in
// etcd. If the key does not exist, the object will have an empty ResourceVersion.
func (e *Etcd) Get() (*api.RangeAllocation, error) {
existing := &api.RangeAllocation{}
if err := e.helper.ExtractObj(baseKey, existing, true); err != nil {
return nil, etcderr.InterpretGetError(err, "serviceipallocation", "")
}
return existing, nil
}
// CreateOrUpdate attempts to update the current etcd state with the provided
// allocation.
func (e *Etcd) CreateOrUpdate(snapshot *api.RangeAllocation) error {
e.lock.Lock()
defer e.lock.Unlock()
last := ""
err := e.helper.GuaranteedUpdate(baseKey, &api.RangeAllocation{}, true,
func(input runtime.Object) (output runtime.Object, ttl uint64, err error) {
existing := input.(*api.RangeAllocation)
switch {
case len(snapshot.ResourceVersion) != 0 && len(existing.ResourceVersion) != 0:
if snapshot.ResourceVersion != existing.ResourceVersion {
return nil, 0, errors.NewConflict("serviceipallocation", "", fmt.Errorf("the provided resource version does not match"))
}
case len(existing.ResourceVersion) != 0:
return nil, 0, errors.NewConflict("serviceipallocation", "", fmt.Errorf("another caller has already initialized the resource"))
}
last = snapshot.ResourceVersion
return snapshot, 0, nil
},
)
if err != nil {
return etcderr.InterpretUpdateError(err, "serviceipallocation", "")
}
err = service.RestoreRange(e.alloc, snapshot)
if err == nil {
e.last = last
}
return err
}
// Keep CI happy; it is unhappy if a directory only contains tests

View File

@@ -22,8 +22,11 @@ import (
"github.com/coreos/go-etcd/etcd"
"fmt"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/allocator"
allocator_etcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/allocator/etcd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/ipallocator"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
@@ -37,15 +40,22 @@ func newHelper(t *testing.T) (*tools.FakeEtcdClient, tools.EtcdHelper) {
return fakeEtcdClient, helper
}
func newStorage(t *testing.T) (ipallocator.Interface, *ipallocator.Range, *tools.FakeEtcdClient) {
func newStorage(t *testing.T) (ipallocator.Interface, allocator.Interface, *tools.FakeEtcdClient) {
fakeEtcdClient, h := newHelper(t)
_, cidr, err := net.ParseCIDR("192.168.1.0/24")
if err != nil {
t.Fatal(err)
}
r := ipallocator.NewCIDRRange(cidr)
storage := NewEtcd(r, h)
return storage, r, fakeEtcdClient
var backing allocator.Interface
storage := ipallocator.NewAllocatorCIDRRange(cidr, func(max int, rangeSpec string) allocator.Interface {
mem := allocator.NewAllocationMap(max, rangeSpec)
backing = mem
etcd := allocator_etcd.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", h)
return etcd
})
return storage, backing, fakeEtcdClient
}
func key() string {
@@ -56,7 +66,7 @@ func key() string {
func TestEmpty(t *testing.T) {
storage, _, ecli := newStorage(t)
ecli.ExpectNotFoundGet(key())
if err := storage.Allocate(net.ParseIP("192.168.1.2")); err != ipallocator.ErrAllocationDisabled {
if err := storage.Allocate(net.ParseIP("192.168.1.2")); fmt.Sprintf("%v", err) != "cannot allocate resources of type serviceipallocation at this time" {
t.Fatal(err)
}
}
@@ -85,17 +95,19 @@ func initialObject(ecli *tools.FakeEtcdClient) {
}
func TestStore(t *testing.T) {
_, cidr, _ := net.ParseCIDR("192.168.1.0/24")
storage, r, ecli := newStorage(t)
initialObject(ecli)
if err := storage.Allocate(net.ParseIP("192.168.1.2")); err != nil {
t.Fatal(err)
}
if err := r.Allocate(net.ParseIP("192.168.1.2")); err != ipallocator.ErrAllocated {
ok, err := r.Allocate(1)
if err != nil {
t.Fatal(err)
}
if ok {
t.Fatal("Expected allocation to fail")
}
if err := storage.Allocate(net.ParseIP("192.168.1.2")); err != ipallocator.ErrAllocated {
t.Fatal(err)
}
@@ -105,29 +117,4 @@ func TestStore(t *testing.T) {
t.Fatalf("%s is empty: %#v", key(), obj)
}
t.Logf("data: %#v", obj.R.Node)
other := ipallocator.NewCIDRRange(cidr)
allocation := &api.RangeAllocation{}
if err := storage.(*Etcd).helper.ExtractObj(key(), allocation, false); err != nil {
t.Fatal(err)
}
if allocation.ResourceVersion != "1" {
t.Fatalf("%#v", allocation)
}
if allocation.Range != "192.168.1.0/24" {
t.Errorf("unexpected stored Range: %s", allocation.Range)
}
if err := other.Restore(cidr, allocation.Data); err != nil {
t.Fatal(err)
}
if !other.Has(net.ParseIP("192.168.1.2")) {
t.Fatalf("could not restore allocated IP: %#v", other)
}
other = ipallocator.NewCIDRRange(cidr)
otherStorage := NewEtcd(other, storage.(*Etcd).helper)
if err := otherStorage.Allocate(net.ParseIP("192.168.1.2")); err != ipallocator.ErrAllocated {
t.Fatal(err)
}
}

View File

@@ -0,0 +1,167 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portallocator
import (
"errors"
"fmt"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/allocator"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
// Interface manages the allocation of ports out of a range. Interface
// should be threadsafe.
type Interface interface {
Allocate(int) error
AllocateNext() (int, error)
Release(int) error
}
var (
ErrFull = errors.New("range is full")
ErrNotInRange = errors.New("provided port is not in the valid range")
ErrAllocated = errors.New("provided port is already allocated")
ErrMismatchedNetwork = errors.New("the provided port range does not match the current port range")
)
type PortAllocator struct {
portRange util.PortRange
alloc allocator.Interface
}
// PortAllocator implements Interface and Snapshottable
var _ Interface = &PortAllocator{}
// NewPortAllocatorCustom creates a PortAllocator over a util.PortRange, calling allocatorFactory to construct the backing store.
func NewPortAllocatorCustom(pr util.PortRange, allocatorFactory allocator.AllocatorFactory) *PortAllocator {
max := pr.Size
rangeSpec := pr.String()
a := &PortAllocator{
portRange: pr,
}
a.alloc = allocatorFactory(max, rangeSpec)
return a
}
// Helper that wraps NewAllocatorCIDRRange, for creating a range backed by an in-memory store.
func NewPortAllocator(pr util.PortRange) *PortAllocator {
return NewPortAllocatorCustom(pr, func(max int, rangeSpec string) allocator.Interface {
return allocator.NewAllocationMap(max, rangeSpec)
})
}
// Free returns the count of port left in the range.
func (r *PortAllocator) Free() int {
return r.alloc.Free()
}
// Allocate attempts to reserve the provided port. ErrNotInRange or
// ErrAllocated will be returned if the port is not valid for this range
// or has already been reserved. ErrFull will be returned if there
// are no ports left.
func (r *PortAllocator) Allocate(port int) error {
ok, offset := r.contains(port)
if !ok {
return ErrNotInRange
}
allocated, err := r.alloc.Allocate(offset)
if err != nil {
return err
}
if !allocated {
return ErrAllocated
}
return nil
}
// AllocateNext reserves one of the ports from the pool. ErrFull may
// be returned if there are no ports left.
func (r *PortAllocator) AllocateNext() (int, error) {
offset, ok, err := r.alloc.AllocateNext()
if err != nil {
return 0, err
}
if !ok {
return 0, ErrFull
}
return r.portRange.Base + offset, nil
}
// Release releases the port back to the pool. Releasing an
// unallocated port or a port out of the range is a no-op and
// returns no error.
func (r *PortAllocator) Release(port int) error {
ok, offset := r.contains(port)
if !ok {
// TODO: log a warning
return nil
}
return r.alloc.Release(offset)
}
// Has returns true if the provided port is already allocated and a call
// to Allocate(port) would fail with ErrAllocated.
func (r *PortAllocator) Has(port int) bool {
ok, offset := r.contains(port)
if !ok {
return false
}
return r.alloc.Has(offset)
}
// Snapshot saves the current state of the pool.
func (r *PortAllocator) Snapshot(dst *api.RangeAllocation) error {
snapshottable, ok := r.alloc.(allocator.Snapshottable)
if !ok {
return fmt.Errorf("not a snapshottable allocator")
}
rangeString, data := snapshottable.Snapshot()
dst.Range = rangeString
dst.Data = data
return nil
}
// Restore restores the pool to the previously captured state. ErrMismatchedNetwork
// is returned if the provided port range doesn't exactly match the previous range.
func (r *PortAllocator) Restore(pr util.PortRange, data []byte) error {
if pr.String() != r.portRange.String() {
return ErrMismatchedNetwork
}
snapshottable, ok := r.alloc.(allocator.Snapshottable)
if !ok {
return fmt.Errorf("not a snapshottable allocator")
}
snapshottable.Restore(pr.String(), data)
return nil
}
// contains returns true and the offset if the port is in the range, and false
// and nil otherwise.
func (r *PortAllocator) contains(port int) (bool, int) {
if !r.portRange.Contains(port) {
return false, 0
}
offset := port - r.portRange.Base
return true, offset
}

View File

@@ -0,0 +1,148 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portallocator
import (
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"strconv"
)
func TestAllocate(t *testing.T) {
pr, err := util.ParsePortRange("10000-10200")
if err != nil {
t.Fatal(err)
}
r := NewPortAllocator(*pr)
if f := r.Free(); f != 201 {
t.Errorf("unexpected free %d", f)
}
found := util.NewStringSet()
count := 0
for r.Free() > 0 {
p, err := r.AllocateNext()
if err != nil {
t.Fatalf("error @ %d: %v", count, err)
}
count++
if !pr.Contains(p) {
t.Fatalf("allocated %s which is outside of %s", p, pr)
}
if found.Has(strconv.Itoa(p)) {
t.Fatalf("allocated %s twice @ %d", p, count)
}
found.Insert(strconv.Itoa(p))
}
if _, err := r.AllocateNext(); err != ErrFull {
t.Fatal(err)
}
released := 10005
if err := r.Release(released); err != nil {
t.Fatal(err)
}
if f := r.Free(); f != 1 {
t.Errorf("unexpected free %d", f)
}
p, err := r.AllocateNext()
if err != nil {
t.Fatal(err)
}
if released != p {
t.Errorf("unexpected %s : %s", p, released)
}
if err := r.Release(released); err != nil {
t.Fatal(err)
}
if err := r.Allocate(1); err != ErrNotInRange {
t.Fatal(err)
}
if err := r.Allocate(10001); err != ErrAllocated {
t.Fatal(err)
}
if err := r.Allocate(20000); err != ErrNotInRange {
t.Fatal(err)
}
if err := r.Allocate(10201); err != ErrNotInRange {
t.Fatal(err)
}
if f := r.Free(); f != 1 {
t.Errorf("unexpected free %d", f)
}
if err := r.Allocate(released); err != nil {
t.Fatal(err)
}
if f := r.Free(); f != 0 {
t.Errorf("unexpected free %d", f)
}
}
func TestSnapshot(t *testing.T) {
pr, err := util.ParsePortRange("10000-10200")
if err != nil {
t.Fatal(err)
}
r := NewPortAllocator(*pr)
ports := []int{}
for i := 0; i < 10; i++ {
port, err := r.AllocateNext()
if err != nil {
t.Fatal(err)
}
ports = append(ports, port)
}
var dst api.RangeAllocation
err = r.Snapshot(&dst)
if err != nil {
t.Fatal(err)
}
pr2, err := util.ParsePortRange(dst.Range)
if err != nil {
t.Fatal(err)
}
if pr.String() != pr2.String() {
t.Fatalf("mismatched networks: %s : %s", pr, pr2)
}
otherPr, err := util.ParsePortRange("200-300")
if err != nil {
t.Fatal(err)
}
other := NewPortAllocator(*otherPr)
if err := r.Restore(*otherPr, dst.Data); err != ErrMismatchedNetwork {
t.Fatal(err)
}
other = NewPortAllocator(*pr2)
if err := other.Restore(*pr2, dst.Data); err != nil {
t.Fatal(err)
}
for _, n := range ports {
if !other.Has(n) {
t.Errorf("restored range does not have %s", n)
}
}
if other.Free() != r.Free() {
t.Errorf("counts do not match: %d", other.Free())
}
}

View File

@@ -0,0 +1,114 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"fmt"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/portallocator"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
// See ipallocator/controller/repair.go; this is a copy for ports.
type Repair struct {
interval time.Duration
registry service.Registry
portRange util.PortRange
alloc service.RangeRegistry
}
// NewRepair creates a controller that periodically ensures that all ports are uniquely allocated across the cluster
// and generates informational warnings for a cluster that is not in sync.
func NewRepair(interval time.Duration, registry service.Registry, portRange util.PortRange, alloc service.RangeRegistry) *Repair {
return &Repair{
interval: interval,
registry: registry,
portRange: portRange,
alloc: alloc,
}
}
// RunUntil starts the controller until the provided ch is closed.
func (c *Repair) RunUntil(ch chan struct{}) {
util.Until(func() {
if err := c.RunOnce(); err != nil {
util.HandleError(err)
}
}, c.interval, ch)
}
// RunOnce verifies the state of the port allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error {
// TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read,
// or if they are executed against different leaders,
// the ordering guarantee required to ensure no port is allocated twice is violated.
// ListServices must return a ResourceVersion higher than the etcd index Get triggers,
// and the release code must not release services that have had ports allocated but not yet been created
// See #8295
latest, err := c.alloc.Get()
if err != nil {
return fmt.Errorf("unable to refresh the port block: %v", err)
}
ctx := api.WithNamespace(api.NewDefaultContext(), api.NamespaceAll)
list, err := c.registry.ListServices(ctx)
if err != nil {
return fmt.Errorf("unable to refresh the port block: %v", err)
}
r := portallocator.NewPortAllocator(c.portRange)
for i := range list.Items {
svc := &list.Items[i]
ports := service.CollectServiceNodePorts(svc)
if len(ports) == 0 {
continue
}
for _, port := range ports {
switch err := r.Allocate(port); err {
case nil:
case portallocator.ErrAllocated:
// TODO: send event
// port is broken, reallocate
util.HandleError(fmt.Errorf("the port %d for service %s/%s was assigned to multiple services; please recreate", port, svc.Name, svc.Namespace))
case portallocator.ErrNotInRange:
// TODO: send event
// port is broken, reallocate
util.HandleError(fmt.Errorf("the port %d for service %s/%s is not within the port range %v; please recreate", port, svc.Name, svc.Namespace, c.portRange))
case portallocator.ErrFull:
// TODO: send event
return fmt.Errorf("the port range %v is full; you must widen the port range in order to create new services", c.portRange)
default:
return fmt.Errorf("unable to allocate port %d for service %s/%s due to an unknown error, exiting: %v", port, svc.Name, svc.Namespace, err)
}
}
}
err = r.Snapshot(latest)
if err != nil {
return fmt.Errorf("unable to persist the updated port allocations: %v", err)
}
if err := c.alloc.CreateOrUpdate(latest); err != nil {
return fmt.Errorf("unable to persist the updated port allocations: %v", err)
}
return nil
}

View File

@@ -0,0 +1,117 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portallocator
// Encapsulates the semantics of a port allocation 'transaction':
// It is better to leak ports than to double-allocate them,
// so we allocate immediately, but defer release.
// On commit we best-effort release the deferred releases.
// On rollback we best-effort release any allocations we did.
//
// Pattern for use:
// op := StartPortAllocationOperation(...)
// defer op.Finish
// ...
// write(updatedOwner)
/// op.Commit()
type portAllocationOperation struct {
pa Interface
allocated []int
releaseDeferred []int
shouldRollback bool
}
// Creates a portAllocationOperation, tracking a set of allocations & releases
func StartOperation(pa Interface) *portAllocationOperation {
op := &portAllocationOperation{}
op.pa = pa
op.allocated = []int{}
op.releaseDeferred = []int{}
op.shouldRollback = true
return op
}
// Will rollback unless marked as shouldRollback = false by a Commit(). Call from a defer block
func (op *portAllocationOperation) Finish() {
if op.shouldRollback {
op.Rollback()
}
}
// (Try to) undo any operations we did
func (op *portAllocationOperation) Rollback() []error {
errors := []error{}
for _, allocated := range op.allocated {
err := op.pa.Release(allocated)
if err != nil {
errors = append(errors, err)
}
}
if len(errors) == 0 {
return nil
}
return errors
}
// (Try to) perform any deferred operations.
// Note that even if this fails, we don't rollback; we always want to err on the side of over-allocation,
// and Commit should be called _after_ the owner is written
func (op *portAllocationOperation) Commit() []error {
errors := []error{}
for _, release := range op.releaseDeferred {
err := op.pa.Release(release)
if err != nil {
errors = append(errors, err)
}
}
// Even on error, we don't rollback
// Problems should be fixed by an eventual reconciliation / restart
op.shouldRollback = false
if len(errors) == 0 {
return nil
}
return errors
}
// Allocates a port, and record it for future rollback
func (op *portAllocationOperation) Allocate(port int) error {
err := op.pa.Allocate(port)
if err == nil {
op.allocated = append(op.allocated, port)
}
return err
}
// Allocates a port, and record it for future rollback
func (op *portAllocationOperation) AllocateNext() (int, error) {
port, err := op.pa.AllocateNext()
if err == nil {
op.allocated = append(op.allocated, port)
}
return port, err
}
// Marks a port so that it will be released if this operation Commits
func (op *portAllocationOperation) ReleaseDeferred(port int) {
op.releaseDeferred = append(op.releaseDeferred, port)
}

View File

@@ -17,12 +17,9 @@ limitations under the License.
package service
import (
"net"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/ipallocator"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
)
@@ -36,8 +33,9 @@ type Registry interface {
WatchServices(ctx api.Context, labels labels.Selector, fields fields.Selector, resourceVersion string) (watch.Interface, error)
}
// IPRegistry is a registry that can retrieve or persist a RangeAllocation object.
type IPRegistry interface {
// TODO: Move to a general location (as other components may need allocation in future; it's not service specific)
// RangeRegistry is a registry that can retrieve or persist a RangeAllocation object.
type RangeRegistry interface {
// Get returns the latest allocation, an empty object if no allocation has been made,
// or an error if the allocation could not be retrieved.
Get() (*api.RangeAllocation, error)
@@ -45,19 +43,3 @@ type IPRegistry interface {
// has occured since the item was last created.
CreateOrUpdate(*api.RangeAllocation) error
}
// RestoreRange updates a snapshottable ipallocator from a RangeAllocation
func RestoreRange(dst ipallocator.Snapshottable, src *api.RangeAllocation) error {
_, network, err := net.ParseCIDR(src.Range)
if err != nil {
return err
}
return dst.Restore(network, src.Data)
}
// SnapshotRange updates a RangeAllocation to match a snapshottable ipallocator
func SnapshotRange(dst *api.RangeAllocation, src ipallocator.Snapshottable) {
network, data := src.Snapshot()
dst.Range = network.String()
dst.Data = data
}

View File

@@ -33,30 +33,34 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/endpoint"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/minion"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/ipallocator"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/portallocator"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/fielderrors"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
"github.com/golang/glog"
)
// REST adapts a service registry into apiserver's RESTStorage model.
type REST struct {
registry Registry
machines minion.Registry
endpoints endpoint.Registry
portals ipallocator.Interface
clusterName string
registry Registry
machines minion.Registry
endpoints endpoint.Registry
portals ipallocator.Interface
serviceNodePorts portallocator.Interface
clusterName string
}
// NewStorage returns a new REST.
func NewStorage(registry Registry, machines minion.Registry, endpoints endpoint.Registry, portals ipallocator.Interface,
clusterName string) *REST {
serviceNodePorts portallocator.Interface, clusterName string) *REST {
return &REST{
registry: registry,
machines: machines,
endpoints: endpoints,
portals: portals,
clusterName: clusterName,
registry: registry,
machines: machines,
endpoints: endpoints,
portals: portals,
serviceNodePorts: serviceNodePorts,
clusterName: clusterName,
}
}
@@ -76,6 +80,9 @@ func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, err
}
}()
nodePortOp := portallocator.StartOperation(rs.serviceNodePorts)
defer nodePortOp.Finish()
if api.IsServiceIPRequested(service) {
// Allocate next available.
ip, err := rs.portals.AllocateNext()
@@ -94,12 +101,37 @@ func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, err
releaseServiceIP = true
}
assignNodePorts := shouldAssignNodePorts(service)
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
if servicePort.NodePort != 0 {
err := nodePortOp.Allocate(servicePort.NodePort)
if err != nil {
el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("nodePort", servicePort.NodePort, err.Error())}.PrefixIndex(i).Prefix("spec.ports")
return nil, errors.NewInvalid("Service", service.Name, el)
}
} else if assignNodePorts {
nodePort, err := nodePortOp.AllocateNext()
if err != nil {
el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("nodePort", servicePort.NodePort, err.Error())}.PrefixIndex(i).Prefix("spec.ports")
return nil, errors.NewInvalid("Service", service.Name, el)
}
servicePort.NodePort = nodePort
}
}
out, err := rs.registry.CreateService(ctx, service)
if err != nil {
err = rest.CheckGeneratedNameError(rest.Services, err, service)
}
if err == nil {
el := nodePortOp.Commit()
if el != nil {
// these should be caught by an eventual reconciliation / restart
glog.Errorf("error(s) committing service node-ports changes: %v", el)
}
releaseServiceIP = false
}
@@ -111,10 +143,25 @@ func (rs *REST) Delete(ctx api.Context, id string) (runtime.Object, error) {
if err != nil {
return nil, err
}
err = rs.registry.DeleteService(ctx, id)
if err != nil {
return nil, err
}
if api.IsServiceIPSet(service) {
rs.portals.Release(net.ParseIP(service.Spec.PortalIP))
}
return &api.Status{Status: api.StatusSuccess}, rs.registry.DeleteService(ctx, id)
for _, nodePort := range CollectServiceNodePorts(service) {
err := rs.serviceNodePorts.Release(nodePort)
if err != nil {
// these should be caught by an eventual reconciliation / restart
glog.Errorf("Error releasing service %s node port %d: %v", service.Name, nodePort, err)
}
}
return &api.Status{Status: api.StatusSuccess}, nil
}
func (rs *REST) Get(ctx api.Context, id string) (runtime.Object, error) {
@@ -170,7 +217,70 @@ func (rs *REST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, boo
if errs := validation.ValidateServiceUpdate(oldService, service); len(errs) > 0 {
return nil, false, errors.NewInvalid("service", service.Name, errs)
}
nodePortOp := portallocator.StartOperation(rs.serviceNodePorts)
defer nodePortOp.Finish()
assignNodePorts := shouldAssignNodePorts(service)
oldNodePorts := CollectServiceNodePorts(oldService)
newNodePorts := []int{}
if assignNodePorts {
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
nodePort := servicePort.NodePort
if nodePort != 0 {
if !contains(oldNodePorts, nodePort) {
err := nodePortOp.Allocate(nodePort)
if err != nil {
el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("nodePort", nodePort, err.Error())}.PrefixIndex(i).Prefix("spec.ports")
return nil, false, errors.NewInvalid("Service", service.Name, el)
}
}
} else {
nodePort, err = nodePortOp.AllocateNext()
if err != nil {
el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("nodePort", nodePort, err.Error())}.PrefixIndex(i).Prefix("spec.ports")
return nil, false, errors.NewInvalid("Service", service.Name, el)
}
servicePort.NodePort = nodePort
}
// Detect duplicate node ports; this should have been caught by validation, so we panic
if contains(newNodePorts, nodePort) {
panic("duplicate node port")
}
newNodePorts = append(newNodePorts, nodePort)
}
} else {
// Validate should have validated that nodePort == 0
}
// The comparison loops are O(N^2), but we don't expect N to be huge
// (there's a hard-limit at 2^16, because they're ports; and even 4 ports would be a lot)
for _, oldNodePort := range oldNodePorts {
if !contains(newNodePorts, oldNodePort) {
continue
}
nodePortOp.ReleaseDeferred(oldNodePort)
}
// Remove any LoadBalancerStatus now if Type != LoadBalancer;
// although loadbalancer delete is actually asynchronous, we don't need to expose the user to that complexity.
if service.Spec.Type != api.ServiceTypeLoadBalancer {
service.Status.LoadBalancer = api.LoadBalancerStatus{}
}
out, err := rs.registry.UpdateService(ctx, service)
if err == nil {
el := nodePortOp.Commit()
if el != nil {
// problems should be fixed by an eventual reconciliation / restart
glog.Errorf("error(s) committing NodePorts changes: %v", el)
}
}
return out, false, err
}
@@ -212,3 +322,39 @@ func (rs *REST) ResourceLocation(ctx api.Context, id string) (*url.URL, http.Rou
}
return nil, nil, fmt.Errorf("no endpoints available for %q", id)
}
// This is O(N), but we expect haystack to be small;
// so small that we expect a linear search to be faster
func contains(haystack []int, needle int) bool {
for _, v := range haystack {
if v == needle {
return true
}
}
return false
}
func CollectServiceNodePorts(service *api.Service) []int {
servicePorts := []int{}
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
if servicePort.NodePort != 0 {
servicePorts = append(servicePorts, servicePort.NodePort)
}
}
return servicePorts
}
func shouldAssignNodePorts(service *api.Service) bool {
switch service.Spec.Type {
case api.ServiceTypeLoadBalancer:
return true
case api.ServiceTypeNodePort:
return true
case api.ServiceTypeClusterIP:
return false
default:
glog.Errorf("Unknown service type: %v", service.Spec.Type)
return false
}
}

View File

@@ -30,6 +30,8 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/registrytest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/ipallocator"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/portallocator"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
func NewTestREST(t *testing.T, endpoints *api.EndpointsList) (*REST, *registrytest.ServiceRegistry) {
@@ -40,7 +42,12 @@ func NewTestREST(t *testing.T, endpoints *api.EndpointsList) (*REST, *registryte
}
nodeRegistry := registrytest.NewMinionRegistry(machines, api.NodeResources{})
r := ipallocator.NewCIDRRange(makeIPNet(t))
storage := NewStorage(registry, nodeRegistry, endpointRegistry, r, "kubernetes")
portRange := util.PortRange{Base: 30000, Size: 1000}
portAllocator := portallocator.NewPortAllocator(portRange)
storage := NewStorage(registry, nodeRegistry, endpointRegistry, r, portAllocator, "kubernetes")
return storage, registry
}
@@ -68,6 +75,7 @@ func TestServiceRegistryCreate(t *testing.T) {
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -109,6 +117,7 @@ func TestServiceStorageValidatesCreate(t *testing.T) {
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -120,6 +129,7 @@ func TestServiceStorageValidatesCreate(t *testing.T) {
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Protocol: api.ProtocolTCP,
}},
@@ -162,6 +172,7 @@ func TestServiceRegistryUpdate(t *testing.T) {
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz2"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -205,6 +216,7 @@ func TestServiceStorageValidatesUpdate(t *testing.T) {
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -216,6 +228,7 @@ func TestServiceStorageValidatesUpdate(t *testing.T) {
Spec: api.ServiceSpec{
Selector: map[string]string{"ThisSelectorFailsValidation": "ok"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -240,9 +253,9 @@ func TestServiceRegistryExternalService(t *testing.T) {
svc := &api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
CreateExternalLoadBalancer: true,
SessionAffinity: api.ServiceAffinityNone,
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeLoadBalancer,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -270,6 +283,7 @@ func TestServiceRegistryDelete(t *testing.T) {
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -289,9 +303,9 @@ func TestServiceRegistryDeleteExternal(t *testing.T) {
svc := &api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
CreateExternalLoadBalancer: true,
SessionAffinity: api.ServiceAffinityNone,
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeLoadBalancer,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -313,9 +327,9 @@ func TestServiceRegistryUpdateExternalService(t *testing.T) {
svc1 := &api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
CreateExternalLoadBalancer: false,
SessionAffinity: api.ServiceAffinityNone,
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -328,7 +342,7 @@ func TestServiceRegistryUpdateExternalService(t *testing.T) {
// Modify load balancer to be external.
svc2 := deepCloneService(svc1)
svc2.Spec.CreateExternalLoadBalancer = true
svc2.Spec.Type = api.ServiceTypeLoadBalancer
if _, _, err := storage.Update(ctx, svc2); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@@ -349,9 +363,9 @@ func TestServiceRegistryUpdateMultiPortExternalService(t *testing.T) {
svc1 := &api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
CreateExternalLoadBalancer: true,
SessionAffinity: api.ServiceAffinityNone,
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeLoadBalancer,
Ports: []api.ServicePort{{
Name: "p",
Port: 6502,
@@ -491,6 +505,7 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -512,6 +527,7 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -541,6 +557,7 @@ func TestServiceRegistryIPAllocation(t *testing.T) {
Selector: map[string]string{"bar": "baz"},
PortalIP: testIP,
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -566,6 +583,7 @@ func TestServiceRegistryIPReallocation(t *testing.T) {
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -582,13 +600,17 @@ func TestServiceRegistryIPReallocation(t *testing.T) {
t.Errorf("Unexpected PortalIP: %s", created_service_1.Spec.PortalIP)
}
rest.Delete(ctx, created_service_1.Name)
_, err := rest.Delete(ctx, created_service_1.Name)
if err != nil {
t.Errorf("Unexpected error deleting service: %v", err)
}
svc2 := &api.Service{
ObjectMeta: api.ObjectMeta{Name: "bar"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -614,6 +636,7 @@ func TestServiceRegistryIPUpdate(t *testing.T) {
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -649,15 +672,15 @@ func TestServiceRegistryIPUpdate(t *testing.T) {
}
}
func TestServiceRegistryIPExternalLoadBalancer(t *testing.T) {
func TestServiceRegistryIPLoadBalancer(t *testing.T) {
rest, _ := NewTestREST(t, nil)
svc := &api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
CreateExternalLoadBalancer: true,
SessionAffinity: api.ServiceAffinityNone,
Selector: map[string]string{"bar": "baz"},
SessionAffinity: api.ServiceAffinityNone,
Type: api.ServiceTypeLoadBalancer,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -730,6 +753,7 @@ func TestCreate(t *testing.T) {
Selector: map[string]string{"bar": "baz"},
PortalIP: "None",
SessionAffinity: "None",
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,
@@ -746,6 +770,7 @@ func TestCreate(t *testing.T) {
Selector: map[string]string{"bar": "baz"},
PortalIP: "invalid",
SessionAffinity: "None",
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{{
Port: 6502,
Protocol: api.ProtocolTCP,

View File

@@ -28,6 +28,13 @@ import (
"github.com/golang/glog"
)
type RulePosition string
const (
Prepend RulePosition = "-I"
Append RulePosition = "-A"
)
// An injectable interface for running iptables commands. Implementations must be goroutine-safe.
type Interface interface {
// EnsureChain checks if the specified chain exists and, if not, creates it. If the chain existed, return true.
@@ -37,7 +44,7 @@ type Interface interface {
// DeleteChain deletes the specified chain. If the chain did not exist, return error.
DeleteChain(table Table, chain Chain) error
// EnsureRule checks if the specified rule is present and, if not, creates it. If the rule existed, return true.
EnsureRule(table Table, chain Chain, args ...string) (bool, error)
EnsureRule(position RulePosition, table Table, chain Chain, args ...string) (bool, error)
// DeleteRule checks if the specified rule is present and, if so, deletes it.
DeleteRule(table Table, chain Chain, args ...string) error
// IsIpv6 returns true if this is managing ipv6 tables
@@ -126,7 +133,7 @@ func (runner *runner) DeleteChain(table Table, chain Chain) error {
}
// EnsureRule is part of Interface.
func (runner *runner) EnsureRule(table Table, chain Chain, args ...string) (bool, error) {
func (runner *runner) EnsureRule(position RulePosition, table Table, chain Chain, args ...string) (bool, error) {
fullArgs := makeFullArgs(table, chain, args...)
runner.mu.Lock()
@@ -139,7 +146,7 @@ func (runner *runner) EnsureRule(table Table, chain Chain, args ...string) (bool
if exists {
return true, nil
}
out, err := runner.run(opAppendRule, fullArgs)
out, err := runner.run(operation(position), fullArgs)
if err != nil {
return false, fmt.Errorf("error appending rule: %v: %s", err, out)
}

View File

@@ -176,7 +176,7 @@ func TestEnsureRuleAlreadyExists(t *testing.T) {
},
}
runner := New(&fexec, ProtocolIpv4)
exists, err := runner.EnsureRule(TableNAT, ChainOutput, "abc", "123")
exists, err := runner.EnsureRule(Append, TableNAT, ChainOutput, "abc", "123")
if err != nil {
t.Errorf("expected success, got %v", err)
}
@@ -212,7 +212,7 @@ func TestEnsureRuleNew(t *testing.T) {
},
}
runner := New(&fexec, ProtocolIpv4)
exists, err := runner.EnsureRule(TableNAT, ChainOutput, "abc", "123")
exists, err := runner.EnsureRule(Append, TableNAT, ChainOutput, "abc", "123")
if err != nil {
t.Errorf("expected success, got %v", err)
}
@@ -245,7 +245,7 @@ func TestEnsureRuleErrorChecking(t *testing.T) {
},
}
runner := New(&fexec, ProtocolIpv4)
_, err := runner.EnsureRule(TableNAT, ChainOutput, "abc", "123")
_, err := runner.EnsureRule(Append, TableNAT, ChainOutput, "abc", "123")
if err == nil {
t.Errorf("expected failure")
}
@@ -275,7 +275,7 @@ func TestEnsureRuleErrorCreating(t *testing.T) {
},
}
runner := New(&fexec, ProtocolIpv4)
_, err := runner.EnsureRule(TableNAT, ChainOutput, "abc", "123")
_, err := runner.EnsureRule(Append, TableNAT, ChainOutput, "abc", "123")
if err == nil {
t.Errorf("expected failure")
}

View File

@@ -19,20 +19,27 @@ package e2e
import (
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"sort"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// This should match whatever the default/configured range is
var ServiceNodePortRange = util.PortRange{Base: 30000, Size: 2767}
var _ = Describe("Services", func() {
var c *client.Client
// Use these in tests. They're unique for each test to prevent name collisions.
@@ -252,31 +259,76 @@ var _ = Describe("Services", func() {
}, 240.0)
It("should be able to create a functioning external load balancer", func() {
if !providerIs("gce", "gke") {
By(fmt.Sprintf("Skipping service external load balancer test; uses createExternalLoadBalancer, a (gce|gke) feature"))
if !providerIs("gce", "gke", "aws") {
By(fmt.Sprintf("Skipping service external load balancer test; uses ServiceTypeLoadBalancer, a (gce|gke|aws) feature"))
return
}
serviceName := "external-lb-test"
ns := namespaces[0]
labels := map[string]string{
"key0": "value0",
}
service := &api.Service{
ObjectMeta: api.ObjectMeta{
Name: serviceName,
},
Spec: api.ServiceSpec{
Selector: labels,
Ports: []api.ServicePort{{
Port: 80,
TargetPort: util.NewIntOrStringFromInt(80),
}},
CreateExternalLoadBalancer: true,
},
}
t := NewWebserverTest(c, ns, serviceName)
defer func() {
defer GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
Failf("errors in cleanup: %v", errs)
}
}()
service := t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeLoadBalancer
By("creating service " + serviceName + " with external load balancer in namespace " + ns)
result, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
// Wait for the load balancer to be created asynchronously, which is
// currently indicated by ingress point(s) being added to the status.
result, err = waitForLoadBalancerIngress(c, serviceName, ns)
Expect(err).NotTo(HaveOccurred())
if len(result.Status.LoadBalancer.Ingress) != 1 {
Failf("got unexpected number (%v) of ingress points for externally load balanced service: %v", result.Status.LoadBalancer.Ingress, result)
}
ingress := result.Status.LoadBalancer.Ingress[0]
if len(result.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for LoadBalancer service: %v", result)
}
port := result.Spec.Ports[0]
if port.NodePort == 0 {
Failf("got unexpected Spec.Ports[0].nodePort for LoadBalancer service: %v", result)
}
if !ServiceNodePortRange.Contains(port.NodePort) {
Failf("got unexpected (out-of-range) port for LoadBalancer service: %v", result)
}
By("creating pod to be part of service " + serviceName)
t.CreateWebserverPod()
By("hitting the pod through the service's NodePort")
testReachable(pickMinionIP(c), port.NodePort)
By("hitting the pod through the service's external load balancer")
testLoadBalancerReachable(ingress, 80)
})
It("should be able to create a functioning NodePort service", func() {
serviceName := "nodeportservice-test"
ns := namespaces[0]
t := NewWebserverTest(c, ns, serviceName)
defer func() {
defer GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
Failf("errors in cleanup: %v", errs)
}
}()
service := t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeNodePort
By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
result, err := c.Services(ns).Create(service)
Expect(err).NotTo(HaveOccurred())
defer func(ns, serviceName string) { // clean up when we're done
@@ -285,71 +337,440 @@ var _ = Describe("Services", func() {
Expect(err).NotTo(HaveOccurred())
}(ns, serviceName)
// Wait for the load balancer to be created asynchronously, which is
// currently indicated by a public IP address being added to the spec.
result, err = waitForPublicIPs(c, serviceName, ns)
Expect(err).NotTo(HaveOccurred())
if len(result.Spec.PublicIPs) != 1 {
Failf("got unexpected number (%d) of public IPs for externally load balanced service: %v", result.Spec.PublicIPs, result)
if len(result.Spec.Ports) != 1 {
Failf("got unexpected number (%d) of Ports for NodePort service: %v", len(result.Spec.Ports), result)
}
ip := result.Spec.PublicIPs[0]
port := result.Spec.Ports[0].Port
pod := &api.Pod{
TypeMeta: api.TypeMeta{
Kind: "Pod",
APIVersion: latest.Version,
},
ObjectMeta: api.ObjectMeta{
Name: "elb-test-" + string(util.NewUUID()),
Labels: labels,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "webserver",
Image: "gcr.io/google_containers/test-webserver",
},
},
},
nodePort := result.Spec.Ports[0].NodePort
if nodePort == 0 {
Failf("got unexpected nodePort (%d) on Ports[0] for NodePort service: %v", nodePort, result)
}
if !ServiceNodePortRange.Contains(nodePort) {
Failf("got unexpected (out-of-range) port for NodePort service: %v", result)
}
By("creating pod to be part of service " + serviceName)
podClient := c.Pods(ns)
defer func() {
By("deleting pod " + pod.Name)
defer GinkgoRecover()
podClient.Delete(pod.Name, nil)
}()
if _, err := podClient.Create(pod); err != nil {
Failf("Failed to create pod %s: %v", pod.Name, err)
}
expectNoError(waitForPodRunningInNamespace(c, pod.Name, ns))
t.CreateWebserverPod()
By("hitting the pod through the service's external load balancer")
var resp *http.Response
for t := time.Now(); time.Since(t) < podStartTimeout; time.Sleep(5 * time.Second) {
resp, err = http.Get(fmt.Sprintf("http://%s:%d", ip, port))
if err == nil {
By("hitting the pod through the service's NodePort")
ip := pickMinionIP(c)
testReachable(ip, nodePort)
})
It("should be able to change the type and nodeport settings of a service", func() {
serviceName := "mutability-service-test"
ns := namespaces[0]
t := NewWebserverTest(c, ns, serviceName)
defer func() {
defer GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
Failf("errors in cleanup: %v", errs)
}
}()
service := t.BuildServiceSpec()
By("creating service " + serviceName + " with type unspecified in namespace " + t.Namespace)
service, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
if service.Spec.Type != api.ServiceTypeClusterIP {
Failf("got unexpected Spec.Type for default service: %v", service)
}
if len(service.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for default service: %v", service)
}
port := service.Spec.Ports[0]
if port.NodePort != 0 {
Failf("got unexpected Spec.Ports[0].nodePort for default service: %v", service)
}
if len(service.Status.LoadBalancer.Ingress) != 0 {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for default service: %v", service)
}
By("creating pod to be part of service " + t.ServiceName)
t.CreateWebserverPod()
By("changing service " + serviceName + " to type=NodePort")
service.Spec.Type = api.ServiceTypeNodePort
service, err = c.Services(ns).Update(service)
Expect(err).NotTo(HaveOccurred())
if service.Spec.Type != api.ServiceTypeNodePort {
Failf("got unexpected Spec.Type for NodePort service: %v", service)
}
if len(service.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for NodePort service: %v", service)
}
port = service.Spec.Ports[0]
if port.NodePort == 0 {
Failf("got unexpected Spec.Ports[0].nodePort for NodePort service: %v", service)
}
if !ServiceNodePortRange.Contains(port.NodePort) {
Failf("got unexpected (out-of-range) port for NodePort service: %v", service)
}
if len(service.Status.LoadBalancer.Ingress) != 0 {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service)
}
By("hitting the pod through the service's NodePort")
ip := pickMinionIP(c)
nodePort1 := port.NodePort // Save for later!
testReachable(ip, nodePort1)
By("changing service " + serviceName + " to type=LoadBalancer")
service.Spec.Type = api.ServiceTypeLoadBalancer
service, err = c.Services(ns).Update(service)
Expect(err).NotTo(HaveOccurred())
// Wait for the load balancer to be created asynchronously
service, err = waitForLoadBalancerIngress(c, serviceName, ns)
Expect(err).NotTo(HaveOccurred())
if service.Spec.Type != api.ServiceTypeLoadBalancer {
Failf("got unexpected Spec.Type for LoadBalancer service: %v", service)
}
if len(service.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for LoadBalancer service: %v", service)
}
port = service.Spec.Ports[0]
if port.NodePort != nodePort1 {
Failf("got unexpected Spec.Ports[0].nodePort for LoadBalancer service: %v", service)
}
if len(service.Status.LoadBalancer.Ingress) != 1 {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for LoadBalancer service: %v", service)
}
ingress1 := service.Status.LoadBalancer.Ingress[0]
if ingress1.IP == "" && ingress1.Hostname == "" {
Failf("got unexpected Status.LoadBalancer.Ingresss[0] for LoadBalancer service: %v", service)
}
By("hitting the pod through the service's NodePort")
ip = pickMinionIP(c)
testReachable(ip, nodePort1)
By("hitting the pod through the service's LoadBalancer")
testLoadBalancerReachable(ingress1, 80)
By("changing service " + serviceName + " update NodePort")
nodePort2 := nodePort1 - 1
if !ServiceNodePortRange.Contains(nodePort2) {
//Check for (unlikely) assignment at bottom of range
nodePort2 = nodePort1 + 1
}
service.Spec.Ports[0].NodePort = nodePort2
service, err = c.Services(ns).Update(service)
Expect(err).NotTo(HaveOccurred())
if service.Spec.Type != api.ServiceTypeLoadBalancer {
Failf("got unexpected Spec.Type for updated-NodePort service: %v", service)
}
if len(service.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for updated-NodePort service: %v", service)
}
port = service.Spec.Ports[0]
if port.NodePort != nodePort2 {
Failf("got unexpected Spec.Ports[0].nodePort for NodePort service: %v", service)
}
if len(service.Status.LoadBalancer.Ingress) != 1 {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service)
}
ingress2 := service.Status.LoadBalancer.Ingress[0]
// TODO: This is a problem on AWS; we can't just always be changing the LB
Expect(ingress1).To(Equal(ingress2))
By("hitting the pod through the service's updated NodePort")
testReachable(ip, nodePort2)
By("hitting the pod through the service's LoadBalancer")
testLoadBalancerReachable(ingress2, 80)
By("checking the old NodePort is closed")
testNotReachable(ip, nodePort1)
By("changing service " + serviceName + " back to type=ClusterIP")
service.Spec.Type = api.ServiceTypeClusterIP
service, err = c.Services(ns).Update(service)
Expect(err).NotTo(HaveOccurred())
if service.Spec.Type != api.ServiceTypeClusterIP {
Failf("got unexpected Spec.Type for back-to-ClusterIP service: %v", service)
}
if len(service.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for back-to-ClusterIP service: %v", service)
}
port = service.Spec.Ports[0]
if port.NodePort != 0 {
Failf("got unexpected Spec.Ports[0].nodePort for back-to-ClusterIP service: %v", service)
}
// Wait for the load balancer to be destroyed asynchronously
service, err = waitForLoadBalancerDestroy(c, serviceName, ns)
Expect(err).NotTo(HaveOccurred())
if len(service.Status.LoadBalancer.Ingress) != 0 {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for back-to-ClusterIP service: %v", service)
}
By("checking the NodePort (original) is closed")
ip = pickMinionIP(c)
testNotReachable(ip, nodePort1)
By("checking the NodePort (updated) is closed")
ip = pickMinionIP(c)
testNotReachable(ip, nodePort2)
By("checking the LoadBalancer is closed")
testLoadBalancerNotReachable(ingress2, 80)
})
It("should release the load balancer when Type goes from LoadBalancer -> NodePort", func() {
serviceName := "service-release-lb"
ns := namespaces[0]
t := NewWebserverTest(c, ns, serviceName)
defer func() {
defer GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
Failf("errors in cleanup: %v", errs)
}
}()
service := t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeLoadBalancer
By("creating service " + serviceName + " with type LoadBalancer")
service, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
By("creating pod to be part of service " + t.ServiceName)
t.CreateWebserverPod()
if service.Spec.Type != api.ServiceTypeLoadBalancer {
Failf("got unexpected Spec.Type for LoadBalancer service: %v", service)
}
if len(service.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for LoadBalancer service: %v", service)
}
nodePort := service.Spec.Ports[0].NodePort
if nodePort == 0 {
Failf("got unexpected Spec.Ports[0].NodePort for LoadBalancer service: %v", service)
}
// Wait for the load balancer to be created asynchronously
service, err = waitForLoadBalancerIngress(c, serviceName, ns)
Expect(err).NotTo(HaveOccurred())
if len(service.Status.LoadBalancer.Ingress) != 1 {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for LoadBalancer service: %v", service)
}
ingress := service.Status.LoadBalancer.Ingress[0]
if ingress.IP == "" && ingress.Hostname == "" {
Failf("got unexpected Status.LoadBalancer.Ingresss[0] for LoadBalancer service: %v", service)
}
By("hitting the pod through the service's NodePort")
ip := pickMinionIP(c)
testReachable(ip, nodePort)
By("hitting the pod through the service's LoadBalancer")
testLoadBalancerReachable(ingress, 80)
By("changing service " + serviceName + " to type=NodePort")
service.Spec.Type = api.ServiceTypeNodePort
service, err = c.Services(ns).Update(service)
Expect(err).NotTo(HaveOccurred())
if service.Spec.Type != api.ServiceTypeNodePort {
Failf("got unexpected Spec.Type for NodePort service: %v", service)
}
if len(service.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for NodePort service: %v", service)
}
if service.Spec.Ports[0].NodePort != nodePort {
Failf("got unexpected Spec.Ports[0].NodePort for NodePort service: %v", service)
}
// Wait for the load balancer to be created asynchronously
service, err = waitForLoadBalancerDestroy(c, serviceName, ns)
Expect(err).NotTo(HaveOccurred())
if len(service.Status.LoadBalancer.Ingress) != 0 {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service)
}
By("hitting the pod through the service's NodePort")
testReachable(ip, nodePort)
By("checking the LoadBalancer is closed")
testLoadBalancerNotReachable(ingress, 80)
})
It("should prevent NodePort collisions", func() {
serviceName := "nodeport-collision"
serviceName2 := serviceName + "2"
ns := namespaces[0]
t := NewWebserverTest(c, ns, serviceName)
defer func() {
defer GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
Failf("errors in cleanup: %v", errs)
}
}()
service := t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeNodePort
By("creating service " + serviceName + " with type NodePort in namespace " + ns)
result, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
if result.Spec.Type != api.ServiceTypeNodePort {
Failf("got unexpected Spec.Type for new service: %v", result)
}
if len(result.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for new service: %v", result)
}
port := result.Spec.Ports[0]
if port.NodePort == 0 {
Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", result)
}
By("creating service " + serviceName + " with conflicting NodePort")
service2 := t.BuildServiceSpec()
service2.Name = serviceName2
service2.Spec.Type = api.ServiceTypeNodePort
service2.Spec.Ports[0].NodePort = port.NodePort
By("creating service " + serviceName2 + " with conflicting NodePort")
result2, err := t.CreateService(service2)
if err == nil {
Failf("Created service with conflicting NodePort: %v", result2)
}
expectedErr := fmt.Sprintf("Service \"%s\" is invalid: spec.ports[0].nodePort: invalid value '%d': provided port is already allocated", serviceName2, port.NodePort)
Expect(fmt.Sprintf("%v", err)).To(Equal(expectedErr))
By("deleting original service " + serviceName + " with type NodePort in namespace " + ns)
err = t.DeleteService(serviceName)
Expect(err).NotTo(HaveOccurred())
By("creating service " + serviceName2 + " with no-longer-conflicting NodePort")
_, err = t.CreateService(service2)
Expect(err).NotTo(HaveOccurred())
})
It("should check NodePort out-of-range", func() {
serviceName := "nodeport-range-test"
ns := namespaces[0]
t := NewWebserverTest(c, ns, serviceName)
defer func() {
defer GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
Failf("errors in cleanup: %v", errs)
}
}()
service := t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeNodePort
By("creating service " + serviceName + " with type NodePort in namespace " + ns)
service, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
if service.Spec.Type != api.ServiceTypeNodePort {
Failf("got unexpected Spec.Type for new service: %v", service)
}
if len(service.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for new service: %v", service)
}
port := service.Spec.Ports[0]
if port.NodePort == 0 {
Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service)
}
if !ServiceNodePortRange.Contains(port.NodePort) {
Failf("got unexpected (out-of-range) port for new service: %v", service)
}
outOfRangeNodePort := 0
for {
outOfRangeNodePort = 1 + rand.Intn(65535)
if !ServiceNodePortRange.Contains(outOfRangeNodePort) {
break
}
}
Expect(err).NotTo(HaveOccurred())
defer resp.Body.Close()
By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort))
service.Spec.Ports[0].NodePort = outOfRangeNodePort
result, err := t.Client.Services(t.Namespace).Update(service)
if err == nil {
Failf("failed to prevent update of service with out-of-range NodePort: %v", result)
}
expectedErr := fmt.Sprintf("Service \"%s\" is invalid: spec.ports[0].nodePort: invalid value '%d': provided port is not in the valid range", serviceName, outOfRangeNodePort)
Expect(fmt.Sprintf("%v", err)).To(Equal(expectedErr))
body, err := ioutil.ReadAll(resp.Body)
By("deleting original service " + serviceName)
err = t.DeleteService(serviceName)
Expect(err).NotTo(HaveOccurred())
if resp.StatusCode != 200 {
Failf("received non-success return status %q trying to access pod through load balancer; got body: %s", resp.Status, string(body))
By(fmt.Sprintf("creating service "+serviceName+" with out-of-range NodePort %d", outOfRangeNodePort))
service = t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeNodePort
service.Spec.Ports[0].NodePort = outOfRangeNodePort
service, err = t.CreateService(service)
if err == nil {
Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service)
}
if !strings.Contains(string(body), "test-webserver") {
Failf("received response body without expected substring 'test-webserver': %s", string(body))
Expect(fmt.Sprintf("%v", err)).To(Equal(expectedErr))
})
It("should release NodePorts on delete", func() {
serviceName := "nodeport-reuse"
ns := namespaces[0]
t := NewWebserverTest(c, ns, serviceName)
defer func() {
defer GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
Failf("errors in cleanup: %v", errs)
}
}()
service := t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeNodePort
By("creating service " + serviceName + " with type NodePort in namespace " + ns)
service, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
if service.Spec.Type != api.ServiceTypeNodePort {
Failf("got unexpected Spec.Type for new service: %v", service)
}
if len(service.Spec.Ports) != 1 {
Failf("got unexpected len(Spec.Ports) for new service: %v", service)
}
port := service.Spec.Ports[0]
if port.NodePort == 0 {
Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service)
}
if !ServiceNodePortRange.Contains(port.NodePort) {
Failf("got unexpected (out-of-range) port for new service: %v", service)
}
port1 := port.NodePort
By("deleting original service " + serviceName)
err = t.DeleteService(serviceName)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", port1))
service = t.BuildServiceSpec()
service.Spec.Type = api.ServiceTypeNodePort
service.Spec.Ports[0].NodePort = port1
service, err = t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
})
It("should correctly serve identically named services in different namespaces on different external IP addresses", func() {
if !providerIs("gce", "gke") {
By(fmt.Sprintf("Skipping service namespace collision test; uses createExternalLoadBalancer, a (gce|gke) feature"))
if !providerIs("gce", "gke", "aws") {
By(fmt.Sprintf("Skipping service namespace collision test; uses ServiceTypeLoadBalancer, a (gce|gke|aws) feature"))
return
}
@@ -366,11 +787,11 @@ var _ = Describe("Services", func() {
Port: 80,
TargetPort: util.NewIntOrStringFromInt(80),
}},
CreateExternalLoadBalancer: true,
Type: api.ServiceTypeLoadBalancer,
},
}
publicIPs := []string{}
ingressPoints := []string{}
for _, namespace := range namespaces {
for _, serviceName := range serviceNames {
service.ObjectMeta.Name = serviceName
@@ -387,31 +808,55 @@ var _ = Describe("Services", func() {
}
for _, namespace := range namespaces {
for _, serviceName := range serviceNames {
result, err := waitForPublicIPs(c, serviceName, namespace)
result, err := waitForLoadBalancerIngress(c, serviceName, namespace)
Expect(err).NotTo(HaveOccurred())
publicIPs = append(publicIPs, result.Spec.PublicIPs...) // Save 'em to check uniqueness
for i := range result.Status.LoadBalancer.Ingress {
ingress := result.Status.LoadBalancer.Ingress[i].IP
if ingress == "" {
ingress = result.Status.LoadBalancer.Ingress[i].Hostname
}
ingressPoints = append(ingressPoints, ingress) // Save 'em to check uniqueness
}
}
}
validateUniqueOrFail(publicIPs)
validateUniqueOrFail(ingressPoints)
})
})
func waitForPublicIPs(c *client.Client, serviceName, namespace string) (*api.Service, error) {
func waitForLoadBalancerIngress(c *client.Client, serviceName, namespace string) (*api.Service, error) {
const timeout = 4 * time.Minute
var service *api.Service
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to have a public IP", timeout, serviceName, namespace))
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to have a LoadBalancer ingress point", timeout, serviceName, namespace))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
service, err := c.Services(namespace).Get(serviceName)
if err != nil {
Logf("Get service failed, ignoring for 5s: %v", err)
continue
}
if len(service.Spec.PublicIPs) > 0 {
if len(service.Status.LoadBalancer.Ingress) > 0 {
return service, nil
}
Logf("Waiting for service %s in namespace %s to have a public IP (%v)", serviceName, namespace, time.Since(start))
Logf("Waiting for service %s in namespace %s to have a LoadBalancer ingress point (%v)", serviceName, namespace, time.Since(start))
}
return service, fmt.Errorf("service %s in namespace %s doesn't have a public IP after %.2f seconds", serviceName, namespace, timeout.Seconds())
return service, fmt.Errorf("service %s in namespace %s doesn't have a LoadBalancer ingress point after %.2f seconds", serviceName, namespace, timeout.Seconds())
}
func waitForLoadBalancerDestroy(c *client.Client, serviceName, namespace string) (*api.Service, error) {
const timeout = 4 * time.Minute
var service *api.Service
By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to have no LoadBalancer ingress points", timeout, serviceName, namespace))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
service, err := c.Services(namespace).Get(serviceName)
if err != nil {
Logf("Get service failed, ignoring for 5s: %v", err)
continue
}
if len(service.Status.LoadBalancer.Ingress) == 0 {
return service, nil
}
Logf("Waiting for service %s in namespace %s to have no LoadBalancer ingress points (%v)", serviceName, namespace, time.Since(start))
}
return service, fmt.Errorf("service %s in namespace %s still has LoadBalancer ingress points after %.2f seconds", serviceName, namespace, timeout.Seconds())
}
func validateUniqueOrFail(s []string) {
@@ -524,3 +969,267 @@ func addEndpointPodOrFail(c *client.Client, ns, name string, labels map[string]s
_, err := c.Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
}
func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []string {
ips := []string{}
for i := range nodes.Items {
item := &nodes.Items[i]
for j := range item.Status.Addresses {
nodeAddress := &item.Status.Addresses[j]
if nodeAddress.Type == addressType {
ips = append(ips, nodeAddress.Address)
}
}
}
return ips
}
func getMinionPublicIps(c *client.Client) ([]string, error) {
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
if err != nil {
return nil, err
}
ips := collectAddresses(nodes, api.NodeExternalIP)
if len(ips) == 0 {
ips = collectAddresses(nodes, api.NodeLegacyHostIP)
}
return ips, nil
}
func pickMinionIP(c *client.Client) string {
publicIps, err := getMinionPublicIps(c)
Expect(err).NotTo(HaveOccurred())
if len(publicIps) == 0 {
Failf("got unexpected number (%d) of public IPs", len(publicIps))
}
ip := publicIps[0]
return ip
}
func testLoadBalancerReachable(ingress api.LoadBalancerIngress, port int) {
ip := ingress.IP
if ip == "" {
ip = ingress.Hostname
}
testReachable(ip, port)
}
func testLoadBalancerNotReachable(ingress api.LoadBalancerIngress, port int) {
ip := ingress.IP
if ip == "" {
ip = ingress.Hostname
}
testNotReachable(ip, port)
}
func testReachable(ip string, port int) {
var err error
var resp *http.Response
url := fmt.Sprintf("http://%s:%d", ip, port)
if ip == "" {
Failf("got empty IP for reachability check", url)
}
if port == 0 {
Failf("got port==0 for reachability check", url)
}
By(fmt.Sprintf("Checking reachability of %s", url))
for t := time.Now(); time.Since(t) < podStartTimeout; time.Sleep(5 * time.Second) {
resp, err = httpGetNoConnectionPool(url)
if err == nil {
break
}
By(fmt.Sprintf("Got error waiting for reachability of %s: %v", url, err))
}
Expect(err).NotTo(HaveOccurred())
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
Expect(err).NotTo(HaveOccurred())
if resp.StatusCode != 200 {
Failf("received non-success return status %q trying to access %s; got body: %s", resp.Status, url, string(body))
}
if !strings.Contains(string(body), "test-webserver") {
Failf("received response body without expected substring 'test-webserver': %s", string(body))
}
}
func testNotReachable(ip string, port int) {
var err error
var resp *http.Response
var body []byte
url := fmt.Sprintf("http://%s:%d", ip, port)
if ip == "" {
Failf("got empty IP for non-reachability check", url)
}
if port == 0 {
Failf("got port==0 for non-reachability check", url)
}
for t := time.Now(); time.Since(t) < podStartTimeout; time.Sleep(5 * time.Second) {
resp, err = httpGetNoConnectionPool(url)
if err != nil {
break
}
body, err = ioutil.ReadAll(resp.Body)
Expect(err).NotTo(HaveOccurred())
resp.Body.Close()
By(fmt.Sprintf("Got success waiting for non-reachability of %s: %v", url, resp.Status))
}
if err == nil {
Failf("able to reach service %s when should no longer have been reachable: %q body=%s", url, resp.Status, string(body))
}
// TODO: Check type of error
By(fmt.Sprintf("Found (expected) error during not-reachability test %v", err))
}
// Does an HTTP GET, but does not reuse TCP connections
// This masks problems where the iptables rule has changed, but we don't see it
func httpGetNoConnectionPool(url string) (*http.Response, error) {
tr := &http.Transport{
DisableKeepAlives: true,
}
client := &http.Client{
Transport: tr,
}
return client.Get(url)
}
// Simple helper class to avoid too much boilerplate in tests
type WebserverTest struct {
ServiceName string
Namespace string
Client *client.Client
TestId string
Labels map[string]string
pods map[string]bool
services map[string]bool
// Used for generating e.g. unique pod names
sequence int32
}
func NewWebserverTest(client *client.Client, namespace string, serviceName string) *WebserverTest {
t := &WebserverTest{}
t.Client = client
t.Namespace = namespace
t.ServiceName = serviceName
t.TestId = t.ServiceName + "-" + string(util.NewUUID())
t.Labels = map[string]string{
"testid": t.TestId,
}
t.pods = make(map[string]bool)
t.services = make(map[string]bool)
return t
}
func (t *WebserverTest) SequenceNext() int {
n := atomic.AddInt32(&t.sequence, 1)
return int(n)
}
// Build default config for a service (which can then be changed)
func (t *WebserverTest) BuildServiceSpec() *api.Service {
service := &api.Service{
ObjectMeta: api.ObjectMeta{
Name: t.ServiceName,
},
Spec: api.ServiceSpec{
Selector: t.Labels,
Ports: []api.ServicePort{{
Port: 80,
TargetPort: util.NewIntOrStringFromInt(80),
}},
},
}
return service
}
// Create a pod with the well-known webserver configuration, and record it for cleanup
func (t *WebserverTest) CreateWebserverPod() {
name := t.ServiceName + "-" + strconv.Itoa(t.SequenceNext())
pod := &api.Pod{
TypeMeta: api.TypeMeta{
Kind: "Pod",
APIVersion: latest.Version,
},
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: t.Labels,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "webserver",
Image: "gcr.io/google_containers/test-webserver",
},
},
},
}
_, err := t.CreatePod(pod)
if err != nil {
Failf("Failed to create pod %s: %v", pod.Name, err)
}
expectNoError(waitForPodRunningInNamespace(t.Client, pod.Name, t.Namespace))
}
// Create a pod, and record it for cleanup
func (t *WebserverTest) CreatePod(pod *api.Pod) (*api.Pod, error) {
podClient := t.Client.Pods(t.Namespace)
result, err := podClient.Create(pod)
if err == nil {
t.pods[pod.Name] = true
}
return result, err
}
// Create a service, and record it for cleanup
func (t *WebserverTest) CreateService(service *api.Service) (*api.Service, error) {
result, err := t.Client.Services(t.Namespace).Create(service)
if err == nil {
t.services[service.Name] = true
}
return result, err
}
// Delete a service, and remove it from the cleanup list
func (t *WebserverTest) DeleteService(serviceName string) error {
err := t.Client.Services(t.Namespace).Delete(serviceName)
if err == nil {
delete(t.services, serviceName)
}
return err
}
func (t *WebserverTest) Cleanup() []error {
var errs []error
for podName := range t.pods {
podClient := t.Client.Pods(t.Namespace)
By("deleting pod " + podName + " in namespace " + t.Namespace)
err := podClient.Delete(podName, nil)
if err != nil {
errs = append(errs, err)
}
}
for serviceName := range t.services {
By("deleting service " + serviceName + " in namespace " + t.Namespace)
err := t.Client.Services(t.Namespace).Delete(serviceName)
if err != nil {
errs = append(errs, err)
}
}
return errs
}