Merge branch 'master' of github.com:GoogleCloudPlatform/kubernetes into add-charms

This commit is contained in:
Matt Bruzek
2015-04-22 15:07:49 -05:00
96 changed files with 3261 additions and 464 deletions

View File

@@ -4742,21 +4742,40 @@
} }
] ]
}, },
{
"path": "/api/v1beta1/pods/{name}/exec",
"description": "API at /api/v1beta1 version v1beta1",
"operations": [
{
"type": "string",
"method": "GET",
"summary": "connect GET requests to Pod",
"nickname": "connectGETPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
}
]
},
{ {
"path": "/api/v1beta1/pods/{name}/log", "path": "/api/v1beta1/pods/{name}/log",
"description": "API at /api/v1beta1 version v1beta1", "description": "API at /api/v1beta1 version v1beta1",
"operations": [ "operations": [
{ {
"type": "v1beta1.PodLogOptions", "type": "v1beta1.Pod",
"method": "GET", "method": "GET",
"summary": "read the specified PodLogOptions", "summary": "read the specified Pod",
"nickname": "readPodLogOptions", "nickname": "readPod",
"parameters": [ "parameters": [
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
"name": "name", "name": "name",
"description": "name of the PodLogOptions", "description": "name of the Pod",
"required": true, "required": true,
"allowMultiple": false "allowMultiple": false
}, },
@@ -4773,7 +4792,7 @@
{ {
"code": 200, "code": 200,
"message": "OK", "message": "OK",
"responseModel": "v1beta1.PodLogOptions" "responseModel": "v1beta1.Pod"
} }
], ],
"produces": [ "produces": [
@@ -4785,6 +4804,193 @@
} }
] ]
}, },
{
"path": "/api/v1beta1/pods/{name}/portforward",
"description": "API at /api/v1beta1 version v1beta1",
"operations": [
{
"type": "string",
"method": "GET",
"summary": "connect GET requests to Pod",
"nickname": "connectGETPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
}
]
},
{
"path": "/api/v1beta1/pods/{name}/proxy",
"description": "API at /api/v1beta1 version v1beta1",
"operations": [
{
"type": "string",
"method": "GET",
"summary": "connect GET requests to Pod",
"nickname": "connectGETPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "POST",
"summary": "connect POST requests to Pod",
"nickname": "connectPOSTPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "PUT",
"summary": "connect PUT requests to Pod",
"nickname": "connectPUTPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "DELETE",
"summary": "connect DELETE requests to Pod",
"nickname": "connectDELETEPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "HEAD",
"summary": "connect HEAD requests to Pod",
"nickname": "connectHEADPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "OPTIONS",
"summary": "connect OPTIONS requests to Pod",
"nickname": "connectOPTIONSPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
}
]
},
{
"path": "/api/v1beta1/pods/{name}/proxy/{path:*}",
"description": "API at /api/v1beta1 version v1beta1",
"operations": [
{
"type": "string",
"method": "GET",
"summary": "connect GET requests to Pod",
"nickname": "connectGETPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "POST",
"summary": "connect POST requests to Pod",
"nickname": "connectPOSTPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "PUT",
"summary": "connect PUT requests to Pod",
"nickname": "connectPUTPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "DELETE",
"summary": "connect DELETE requests to Pod",
"nickname": "connectDELETEPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "HEAD",
"summary": "connect HEAD requests to Pod",
"nickname": "connectHEADPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "OPTIONS",
"summary": "connect OPTIONS requests to Pod",
"nickname": "connectOPTIONSPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
}
]
},
{ {
"path": "/api/v1beta1/pods/{name}/status", "path": "/api/v1beta1/pods/{name}/status",
"description": "API at /api/v1beta1 version v1beta1", "description": "API at /api/v1beta1 version v1beta1",
@@ -6914,6 +7120,31 @@
"id": "", "id": "",
"properties": null "properties": null
}, },
"v1beta1.AWSElasticBlockStoreVolumeSource": {
"id": "v1beta1.AWSElasticBlockStoreVolumeSource",
"required": [
"volumeID"
],
"properties": {
"fsType": {
"type": "string",
"description": "file system type to mount, such as ext4, xfs, ntfs"
},
"partition": {
"type": "integer",
"format": "int32",
"description": "partition on the disk to mount (e.g., '1' for /dev/sda1); if omitted the plain device name (e.g., /dev/sda) will be mounted"
},
"readOnly": {
"type": "boolean",
"description": "read-only if true, read-write otherwise (false or unspecified)"
},
"volumeID": {
"type": "string",
"description": "unique id of the PD resource in AWS"
}
}
},
"v1beta1.AccessModeType": { "v1beta1.AccessModeType": {
"id": "v1beta1.AccessModeType", "id": "v1beta1.AccessModeType",
"properties": {} "properties": {}
@@ -8668,6 +8899,19 @@
} }
} }
}, },
"v1beta1.PersistentVolumeClaimVolumeSource": {
"id": "v1beta1.PersistentVolumeClaimVolumeSource",
"properties": {
"claimName": {
"type": "string",
"description": "the name of the claim in the same namespace to be mounted as a volume"
},
"readOnly": {
"type": "boolean",
"description": "mount volume as read-only when true; default false"
}
}
},
"v1beta1.PersistentVolumeList": { "v1beta1.PersistentVolumeList": {
"id": "v1beta1.PersistentVolumeList", "id": "v1beta1.PersistentVolumeList",
"properties": { "properties": {
@@ -8727,9 +8971,10 @@
"v1beta1.PersistentVolumeSpec": { "v1beta1.PersistentVolumeSpec": {
"id": "v1beta1.PersistentVolumeSpec", "id": "v1beta1.PersistentVolumeSpec",
"required": [ "required": [
"persistentDisk",
"hostPath", "hostPath",
"glusterfs" "glusterfs",
"persistentDisk",
"awsElasticBlockStore"
], ],
"properties": { "properties": {
"accessModes": { "accessModes": {
@@ -8739,6 +8984,10 @@
}, },
"description": "all ways the volume can be mounted" "description": "all ways the volume can be mounted"
}, },
"awsElasticBlockStore": {
"$ref": "v1beta1.AWSElasticBlockStoreVolumeSource",
"description": "AWS disk resource provisioned by an admin"
},
"capacity": { "capacity": {
"type": "any", "type": "any",
"description": "a description of the persistent volume's resources and capacity" "description": "a description of the persistent volume's resources and capacity"
@@ -8911,63 +9160,6 @@
} }
} }
}, },
"v1beta1.PodLogOptions": {
"id": "v1beta1.PodLogOptions",
"properties": {
"annotations": {
"type": "any",
"description": "map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about the object"
},
"apiVersion": {
"type": "string",
"description": "version of the schema the object should have"
},
"container": {
"type": "string",
"description": "the container for which to stream logs; defaults to only container if there is one container in the pod"
},
"creationTimestamp": {
"type": "string",
"description": "RFC 3339 date and time at which the object was created; populated by the system, read-only; null for lists"
},
"deletionTimestamp": {
"type": "string",
"description": "RFC 3339 date and time at which the object will be deleted; populated by the system when a graceful deletion is requested, read-only; if not set, graceful deletion of the object has not been requested"
},
"follow": {
"type": "boolean",
"description": "follow the log stream of the pod; defaults to false"
},
"generateName": {
"type": "string",
"description": "an optional prefix to use to generate a unique name; has the same validation rules as name; optional, and is applied only name if is not specified"
},
"id": {
"type": "string",
"description": "name of the object; must be a DNS_SUBDOMAIN and unique among all objects of the same kind within the same namespace; used in resource URLs; cannot be updated"
},
"kind": {
"type": "string",
"description": "kind of object, in CamelCase; cannot be updated"
},
"namespace": {
"type": "string",
"description": "namespace to which the object belongs; must be a DNS_SUBDOMAIN; 'default' by default; cannot be updated"
},
"resourceVersion": {
"$ref": "uint64",
"description": "string that identifies the internal version of this object that can be used by clients to determine when objects have changed; populated by the system, read-only; value must be treated as opaque by clients and passed unmodified back to the server: https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/api-conventions.md#concurrency-control-and-consistency"
},
"selfLink": {
"type": "string",
"description": "URL for the object; populated by the system, read-only"
},
"uid": {
"type": "string",
"description": "unique UUID across space and time; populated by the system, read-only; cannot be updated"
}
}
},
"v1beta1.PodState": { "v1beta1.PodState": {
"id": "v1beta1.PodState", "id": "v1beta1.PodState",
"properties": { "properties": {
@@ -9812,7 +10004,7 @@
}, },
"source": { "source": {
"$ref": "v1beta1.VolumeSource", "$ref": "v1beta1.VolumeSource",
"description": "location and type of volume to mount; at most one of HostDir, EmptyDir, GCEPersistentDisk, or GitRepo; default is EmptyDir" "description": "location and type of volume to mount; at most one of HostDir, EmptyDir, GCEPersistentDisk, AWSElasticBlockStore, or GitRepo; default is EmptyDir"
} }
} }
}, },
@@ -9851,6 +10043,7 @@
"hostDir", "hostDir",
"emptyDir", "emptyDir",
"persistentDisk", "persistentDisk",
"awsElasticBlockStore",
"gitRepo", "gitRepo",
"secret", "secret",
"nfs", "nfs",
@@ -9858,6 +10051,10 @@
"glusterfs" "glusterfs"
], ],
"properties": { "properties": {
"awsElasticBlockStore": {
"$ref": "v1beta1.AWSElasticBlockStoreVolumeSource",
"description": "AWS disk resource attached to the host machine on demand"
},
"emptyDir": { "emptyDir": {
"$ref": "v1beta1.EmptyDirVolumeSource", "$ref": "v1beta1.EmptyDirVolumeSource",
"description": "temporary directory that shares a pod's lifetime" "description": "temporary directory that shares a pod's lifetime"
@@ -9886,6 +10083,10 @@
"$ref": "v1beta1.GCEPersistentDiskVolumeSource", "$ref": "v1beta1.GCEPersistentDiskVolumeSource",
"description": "GCE disk resource attached to the host machine on demand" "description": "GCE disk resource attached to the host machine on demand"
}, },
"persistentVolumeClaim": {
"$ref": "v1beta1.PersistentVolumeClaimVolumeSource",
"description": "a reference to a PersistentVolumeClaim in the same namespace"
},
"secret": { "secret": {
"$ref": "v1beta1.SecretVolumeSource", "$ref": "v1beta1.SecretVolumeSource",
"description": "secret to populate volume with" "description": "secret to populate volume with"

View File

@@ -4742,21 +4742,40 @@
} }
] ]
}, },
{
"path": "/api/v1beta2/pods/{name}/exec",
"description": "API at /api/v1beta2 version v1beta2",
"operations": [
{
"type": "string",
"method": "GET",
"summary": "connect GET requests to Pod",
"nickname": "connectGETPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
}
]
},
{ {
"path": "/api/v1beta2/pods/{name}/log", "path": "/api/v1beta2/pods/{name}/log",
"description": "API at /api/v1beta2 version v1beta2", "description": "API at /api/v1beta2 version v1beta2",
"operations": [ "operations": [
{ {
"type": "v1beta2.PodLogOptions", "type": "v1beta2.Pod",
"method": "GET", "method": "GET",
"summary": "read the specified PodLogOptions", "summary": "read the specified Pod",
"nickname": "readPodLogOptions", "nickname": "readPod",
"parameters": [ "parameters": [
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
"name": "name", "name": "name",
"description": "name of the PodLogOptions", "description": "name of the Pod",
"required": true, "required": true,
"allowMultiple": false "allowMultiple": false
}, },
@@ -4773,7 +4792,7 @@
{ {
"code": 200, "code": 200,
"message": "OK", "message": "OK",
"responseModel": "v1beta2.PodLogOptions" "responseModel": "v1beta2.Pod"
} }
], ],
"produces": [ "produces": [
@@ -4785,6 +4804,193 @@
} }
] ]
}, },
{
"path": "/api/v1beta2/pods/{name}/portforward",
"description": "API at /api/v1beta2 version v1beta2",
"operations": [
{
"type": "string",
"method": "GET",
"summary": "connect GET requests to Pod",
"nickname": "connectGETPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
}
]
},
{
"path": "/api/v1beta2/pods/{name}/proxy",
"description": "API at /api/v1beta2 version v1beta2",
"operations": [
{
"type": "string",
"method": "GET",
"summary": "connect GET requests to Pod",
"nickname": "connectGETPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "POST",
"summary": "connect POST requests to Pod",
"nickname": "connectPOSTPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "PUT",
"summary": "connect PUT requests to Pod",
"nickname": "connectPUTPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "DELETE",
"summary": "connect DELETE requests to Pod",
"nickname": "connectDELETEPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "HEAD",
"summary": "connect HEAD requests to Pod",
"nickname": "connectHEADPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "OPTIONS",
"summary": "connect OPTIONS requests to Pod",
"nickname": "connectOPTIONSPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
}
]
},
{
"path": "/api/v1beta2/pods/{name}/proxy/{path:*}",
"description": "API at /api/v1beta2 version v1beta2",
"operations": [
{
"type": "string",
"method": "GET",
"summary": "connect GET requests to Pod",
"nickname": "connectGETPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "POST",
"summary": "connect POST requests to Pod",
"nickname": "connectPOSTPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "PUT",
"summary": "connect PUT requests to Pod",
"nickname": "connectPUTPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "DELETE",
"summary": "connect DELETE requests to Pod",
"nickname": "connectDELETEPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "HEAD",
"summary": "connect HEAD requests to Pod",
"nickname": "connectHEADPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "OPTIONS",
"summary": "connect OPTIONS requests to Pod",
"nickname": "connectOPTIONSPod",
"parameters": [],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
}
]
},
{ {
"path": "/api/v1beta2/pods/{name}/status", "path": "/api/v1beta2/pods/{name}/status",
"description": "API at /api/v1beta2 version v1beta2", "description": "API at /api/v1beta2 version v1beta2",
@@ -6914,6 +7120,31 @@
"id": "", "id": "",
"properties": null "properties": null
}, },
"v1beta2.AWSElasticBlockStoreVolumeSource": {
"id": "v1beta2.AWSElasticBlockStoreVolumeSource",
"required": [
"volumeID"
],
"properties": {
"fsType": {
"type": "string",
"description": "file system type to mount, such as ext4, xfs, ntfs"
},
"partition": {
"type": "integer",
"format": "int32",
"description": "partition on the disk to mount (e.g., '1' for /dev/sda1); if omitted the plain device name (e.g., /dev/sda) will be mounted"
},
"readOnly": {
"type": "boolean",
"description": "read-only if true, read-write otherwise (false or unspecified)"
},
"volumeID": {
"type": "string",
"description": "unique id of the PD resource in AWS"
}
}
},
"v1beta2.AccessModeType": { "v1beta2.AccessModeType": {
"id": "v1beta2.AccessModeType", "id": "v1beta2.AccessModeType",
"properties": {} "properties": {}
@@ -8657,6 +8888,19 @@
} }
} }
}, },
"v1beta2.PersistentVolumeClaimVolumeSource": {
"id": "v1beta2.PersistentVolumeClaimVolumeSource",
"properties": {
"claimName": {
"type": "string",
"description": "the name of the claim in the same namespace to be mounted as a volume"
},
"readOnly": {
"type": "boolean",
"description": "mount volume as read-only when true; default false"
}
}
},
"v1beta2.PersistentVolumeList": { "v1beta2.PersistentVolumeList": {
"id": "v1beta2.PersistentVolumeList", "id": "v1beta2.PersistentVolumeList",
"properties": { "properties": {
@@ -8717,6 +8961,7 @@
"id": "v1beta2.PersistentVolumeSpec", "id": "v1beta2.PersistentVolumeSpec",
"required": [ "required": [
"persistentDisk", "persistentDisk",
"awsElasticBlockStore",
"hostPath", "hostPath",
"glusterfs" "glusterfs"
], ],
@@ -8728,6 +8973,10 @@
}, },
"description": "all ways the volume can be mounted" "description": "all ways the volume can be mounted"
}, },
"awsElasticBlockStore": {
"$ref": "v1beta2.AWSElasticBlockStoreVolumeSource",
"description": "AWS disk resource provisioned by an admin"
},
"capacity": { "capacity": {
"type": "any", "type": "any",
"description": "a description of the persistent volume's resources and capacity" "description": "a description of the persistent volume's resources and capacity"
@@ -8900,63 +9149,6 @@
} }
} }
}, },
"v1beta2.PodLogOptions": {
"id": "v1beta2.PodLogOptions",
"properties": {
"annotations": {
"type": "any",
"description": "map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about the object"
},
"apiVersion": {
"type": "string",
"description": "version of the schema the object should have"
},
"container": {
"type": "string",
"description": "the container for which to stream logs; defaults to only container if there is one container in the pod"
},
"creationTimestamp": {
"type": "string",
"description": "RFC 3339 date and time at which the object was created; populated by the system, read-only; null for lists"
},
"deletionTimestamp": {
"type": "string",
"description": "RFC 3339 date and time at which the object will be deleted; populated by the system when a graceful deletion is requested, read-only; if not set, graceful deletion of the object has not been requested"
},
"follow": {
"type": "boolean",
"description": "follow the log stream of the pod; defaults to false"
},
"generateName": {
"type": "string",
"description": "an optional prefix to use to generate a unique name; has the same validation rules as name; optional, and is applied only name if is not specified"
},
"id": {
"type": "string",
"description": "name of the object; must be a DNS_SUBDOMAIN and unique among all objects of the same kind within the same namespace; used in resource URLs; cannot be updated"
},
"kind": {
"type": "string",
"description": "kind of object, in CamelCase; cannot be updated"
},
"namespace": {
"type": "string",
"description": "namespace to which the object belongs; must be a DNS_SUBDOMAIN; 'default' by default; cannot be updated"
},
"resourceVersion": {
"$ref": "uint64",
"description": "string that identifies the internal version of this object that can be used by clients to determine when objects have changed; populated by the system, read-only; value must be treated as opaque by clients and passed unmodified back to the server: https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/api-conventions.md#concurrency-control-and-consistency"
},
"selfLink": {
"type": "string",
"description": "URL for the object; populated by the system, read-only"
},
"uid": {
"type": "string",
"description": "unique UUID across space and time; populated by the system, read-only"
}
}
},
"v1beta2.PodState": { "v1beta2.PodState": {
"id": "v1beta2.PodState", "id": "v1beta2.PodState",
"properties": { "properties": {
@@ -9801,7 +9993,7 @@
}, },
"source": { "source": {
"$ref": "v1beta2.VolumeSource", "$ref": "v1beta2.VolumeSource",
"description": "location and type of volume to mount; at most one of HostDir, EmptyDir, GCEPersistentDisk, or GitRepo; default is EmptyDir" "description": "location and type of volume to mount; at most one of HostDir, EmptyDir, GCEPersistentDisk, AWSElasticBlockStore, or GitRepo; default is EmptyDir"
} }
} }
}, },
@@ -9832,6 +10024,7 @@
"hostDir", "hostDir",
"emptyDir", "emptyDir",
"persistentDisk", "persistentDisk",
"awsElasticBlockStore",
"gitRepo", "gitRepo",
"secret", "secret",
"nfs", "nfs",
@@ -9839,6 +10032,10 @@
"glusterfs" "glusterfs"
], ],
"properties": { "properties": {
"awsElasticBlockStore": {
"$ref": "v1beta2.AWSElasticBlockStoreVolumeSource",
"description": "AWS disk resource attached to the host machine on demand"
},
"emptyDir": { "emptyDir": {
"$ref": "v1beta2.EmptyDirVolumeSource", "$ref": "v1beta2.EmptyDirVolumeSource",
"description": "temporary directory that shares a pod's lifetime" "description": "temporary directory that shares a pod's lifetime"
@@ -9867,6 +10064,10 @@
"$ref": "v1beta2.GCEPersistentDiskVolumeSource", "$ref": "v1beta2.GCEPersistentDiskVolumeSource",
"description": "GCE disk resource attached to the host machine on demand" "description": "GCE disk resource attached to the host machine on demand"
}, },
"persistentVolumeClaim": {
"$ref": "v1beta2.PersistentVolumeClaimVolumeSource",
"description": "a reference to a PersistentVolumeClaim in the same namespace"
},
"secret": { "secret": {
"$ref": "v1beta2.SecretVolumeSource", "$ref": "v1beta2.SecretVolumeSource",
"description": "secret to populate volume" "description": "secret to populate volume"

File diff suppressed because it is too large Load Diff

View File

@@ -29,6 +29,7 @@ import (
kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api" kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
kclient "github.com/GoogleCloudPlatform/kubernetes/pkg/client" kclient "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
kclientcmd "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd"
kfields "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" kfields "github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
klabels "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" klabels "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
tools "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" tools "github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
@@ -42,6 +43,7 @@ var (
etcd_mutation_timeout = flag.Duration("etcd_mutation_timeout", 10*time.Second, "crash after retrying etcd mutation for a specified duration") etcd_mutation_timeout = flag.Duration("etcd_mutation_timeout", 10*time.Second, "crash after retrying etcd mutation for a specified duration")
etcd_server = flag.String("etcd-server", "http://127.0.0.1:4001", "URL to etcd server") etcd_server = flag.String("etcd-server", "http://127.0.0.1:4001", "URL to etcd server")
verbose = flag.Bool("verbose", false, "log extra information") verbose = flag.Bool("verbose", false, "log extra information")
kubecfg_file = flag.String("kubecfg_file", "", "Location of kubecfg file for access to kubernetes service")
) )
func removeDNS(record string, etcdClient *etcd.Client) error { func removeDNS(record string, etcdClient *etcd.Client) error {
@@ -128,8 +130,9 @@ func newEtcdClient() (client *etcd.Client) {
// TODO: evaluate using pkg/client/clientcmd // TODO: evaluate using pkg/client/clientcmd
func newKubeClient() (*kclient.Client, error) { func newKubeClient() (*kclient.Client, error) {
config := &kclient.Config{} var config *kclient.Config
if *kubecfg_file == "" {
// No kubecfg file provided. Use kubernetes_ro service.
masterHost := os.Getenv("KUBERNETES_RO_SERVICE_HOST") masterHost := os.Getenv("KUBERNETES_RO_SERVICE_HOST")
if masterHost == "" { if masterHost == "" {
log.Fatalf("KUBERNETES_RO_SERVICE_HOST is not defined") log.Fatalf("KUBERNETES_RO_SERVICE_HOST is not defined")
@@ -138,12 +141,20 @@ func newKubeClient() (*kclient.Client, error) {
if masterPort == "" { if masterPort == "" {
log.Fatalf("KUBERNETES_RO_SERVICE_PORT is not defined") log.Fatalf("KUBERNETES_RO_SERVICE_PORT is not defined")
} }
config.Host = fmt.Sprintf("http://%s:%s", masterHost, masterPort) config = &kclient.Config{
Host: fmt.Sprintf("http://%s:%s", masterHost, masterPort),
Version: "v1beta1",
}
} else {
var err error
if config, err = kclientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&kclientcmd.ClientConfigLoadingRules{ExplicitPath: *kubecfg_file},
&kclientcmd.ConfigOverrides{}).ClientConfig(); err != nil {
return nil, err
}
}
log.Printf("Using %s for kubernetes master", config.Host) log.Printf("Using %s for kubernetes master", config.Host)
config.Version = "v1beta1"
log.Printf("Using kubernetes API %s", config.Version) log.Printf("Using kubernetes API %s", config.Version)
return kclient.New(config) return kclient.New(config)
} }

View File

@@ -29,10 +29,15 @@ desiredState:
"-advertise-client-urls=http://127.0.0.1:4001", "-advertise-client-urls=http://127.0.0.1:4001",
] ]
- name: kube2sky - name: kube2sky
image: gcr.io/google_containers/kube2sky:1.1 image: gcr.io/google_containers/kube2sky:1.2
volumeMounts:
- name: dns-token
mountPath: /etc/dns_token
readOnly: true
command: [ command: [
# entrypoint = "/kube2sky", # entrypoint = "/kube2sky",
"-domain={{ pillar['dns_domain'] }}", "-domain={{ pillar['dns_domain'] }}",
"-kubecfg_file=/etc/dns_token/kubeconfig",
] ]
- name: skydns - name: skydns
image: gcr.io/google_containers/skydns:2015-03-11-001 image: gcr.io/google_containers/skydns:2015-03-11-001
@@ -46,3 +51,11 @@ desiredState:
- name: dns - name: dns
containerPort: 53 containerPort: 53
protocol: UDP protocol: UDP
volumes:
- name: dns-token
source:
secret:
target:
kind: Secret
namespace: default
name: token-system-dns

View File

@@ -52,3 +52,14 @@ known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
mkdir -p /srv/salt-overlay/salt/kubelet mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth" kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file) (umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file)
# Generate tokens for other "service accounts". Append to known_tokens.
#
# NB: If this list ever changes, this script actually has to
# change to detect the existence of this file, kill any deleted
# old tokens and add any new tokens (to handle the upgrade case).
local -r service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns")
for account in "${service_accounts[@]}"; do
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "${token},${account},${account}" >> "${KNOWN_TOKENS_FILE}"
done

View File

@@ -438,21 +438,22 @@ function kube-up {
create-kubeconfig create-kubeconfig
) )
# Wait for salt on the minions
sleep 30
echo "Sanity checking cluster..." echo "Sanity checking cluster..."
echo
echo " This will continually check the minions to ensure docker is"
echo " installed. This is usually a good indicator that salt has"
echo " successfully provisioned. This might loop forever if there was"
echo " some uncaught error during start up."
echo
# Basic sanity checking # Basic sanity checking
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
# Make sure docker is installed # Make sure docker is installed
echo "--> Making sure docker is installed on ${MINION_NAMES[$i]}." echo "--> Making sure docker is installed on ${MINION_NAMES[$i]}."
ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} \ until ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p ${ssh_ports[$i]} \
$AZ_CS.cloudapp.net which docker > /dev/null || { $AZ_CS.cloudapp.net which docker > /dev/null 2>&1; do
echo "Docker failed to install on ${MINION_NAMES[$i]}. Your cluster is unlikely" >&2 printf "."
echo "to work correctly. Please run ./cluster/kube-down.sh and re-create the" >&2 sleep 2
echo "cluster. (sorry!)" >&2 done
exit 1
}
done done
echo echo

View File

@@ -25,6 +25,7 @@ source "${KUBE_ROOT}/cluster/common.sh"
NODE_INSTANCE_PREFIX="${INSTANCE_PREFIX}-minion" NODE_INSTANCE_PREFIX="${INSTANCE_PREFIX}-minion"
KUBE_PROMPT_FOR_UPDATE=y KUBE_PROMPT_FOR_UPDATE=y
KUBE_SKIP_UPDATE=${KUBE_SKIP_UPDATE-"n"}
# Verify prereqs # Verify prereqs
function verify-prereqs { function verify-prereqs {
@@ -48,12 +49,20 @@ function verify-prereqs {
fi fi
fi fi
done done
if [[ "${KUBE_SKIP_UPDATE}" == "y" ]]; then
return
fi
# update and install components as needed # update and install components as needed
if [[ "${KUBE_PROMPT_FOR_UPDATE}" != "y" ]]; then if [[ "${KUBE_PROMPT_FOR_UPDATE}" != "y" ]]; then
gcloud_prompt="-q" gcloud_prompt="-q"
fi fi
gcloud ${gcloud_prompt:-} components update preview || true local sudo_prefix=""
gcloud ${gcloud_prompt:-} components update || true if [ ! -w $(dirname `which gcloud`) ]; then
sudo_prefix="sudo"
fi
${sudo_prefix} gcloud ${gcloud_prompt:-} components update preview || true
${sudo_prefix} gcloud ${gcloud_prompt:-} components update alpha || true
${sudo_prefix} gcloud ${gcloud_prompt:-} components update || true
} }
# Create a temp dir that'll be deleted at the end of this bash session. # Create a temp dir that'll be deleted at the end of this bash session.
@@ -967,11 +976,15 @@ function test-teardown {
function ssh-to-node { function ssh-to-node {
local node="$1" local node="$1"
local cmd="$2" local cmd="$2"
# Loop until we can successfully ssh into the box
for try in $(seq 1 5); do for try in $(seq 1 5); do
if gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "${cmd}"; then if gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "echo test"; then
break break
fi fi
sleep 5
done done
# Then actually try the command.
gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "${cmd}"
} }
# Restart the kube-proxy on a node ($1) # Restart the kube-proxy on a node ($1)
@@ -981,7 +994,7 @@ function restart-kube-proxy {
# Restart the kube-apiserver on a node ($1) # Restart the kube-apiserver on a node ($1)
function restart-apiserver { function restart-apiserver {
ssh-to-node "$1" "sudo docker kill `sudo docker ps | grep /kube-apiserver | awk '{print $1}'`" ssh-to-node "$1" "sudo docker kill \`sudo docker ps | grep /kube-apiserver | awk '{print $1}'\`"
} }
# Perform preparations required to run e2e tests # Perform preparations required to run e2e tests

View File

@@ -20,6 +20,7 @@
# config-default.sh. # config-default.sh.
KUBE_PROMPT_FOR_UPDATE=y KUBE_PROMPT_FOR_UPDATE=y
KUBE_SKIP_UPDATE=${KUBE_SKIP_UPDATE-"n"}
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gke/${KUBE_CONFIG_FILE:-config-default.sh}" source "${KUBE_ROOT}/cluster/gke/${KUBE_CONFIG_FILE:-config-default.sh}"
@@ -86,13 +87,20 @@ function verify-prereqs() {
exit 1 exit 1
fi fi
fi fi
if [[ "${KUBE_SKIP_UPDATE}" == "y" ]]; then
return
fi
# update and install components as needed # update and install components as needed
if [[ "${KUBE_PROMPT_FOR_UPDATE}" != "y" ]]; then if [[ "${KUBE_PROMPT_FOR_UPDATE}" != "y" ]]; then
gcloud_prompt="-q" gcloud_prompt="-q"
fi fi
gcloud ${gcloud_prompt:-} components update preview || true local sudo_prefix=""
gcloud ${gcloud_prompt:-} components update alpha || true if [ ! -w $(dirname `which gcloud`) ]; then
gcloud ${gcloud_prompt:-} components update || true sudo_prefix="sudo"
fi
${sudo_prefix} gcloud ${gcloud_prompt:-} components update preview || true
${sudo_prefix} gcloud ${gcloud_prompt:-} components update alpha|| true
${sudo_prefix} gcloud ${gcloud_prompt:-} components update || true
} }
# Instantiate a kubernetes cluster # Instantiate a kubernetes cluster
@@ -238,8 +246,15 @@ function ssh-to-node() {
local node="$1" local node="$1"
local cmd="$2" local cmd="$2"
"${GCLOUD}" compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" \ # Loop until we can successfully ssh into the box
--zone="${ZONE}" "${node}" --command "${cmd}" for try in $(seq 1 5); do
if gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "echo test"; then
break
fi
sleep 5
done
# Then actually try the command.
gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "${cmd}"
} }
# Restart the kube-proxy on a node ($1) # Restart the kube-proxy on a node ($1)
@@ -251,7 +266,7 @@ function restart-kube-proxy() {
# Restart the kube-proxy on master ($1) # Restart the kube-proxy on master ($1)
function restart-apiserver() { function restart-apiserver() {
echo "... in restart-kube-apiserver()" >&2 echo "... in restart-kube-apiserver()" >&2
ssh-to-node "$1" "sudo docker kill `sudo docker ps | grep /kube-apiserver | awk '{print $1}'`" ssh-to-node "$1" "sudo docker kill \`sudo docker ps | grep /kube-apiserver | awk '{print $1}'\`"
} }
# Execute after running tests to perform any required clean-up. This is called # Execute after running tests to perform any required clean-up. This is called

View File

@@ -145,6 +145,17 @@ if [[ ! -f "${known_tokens_file}" ]]; then
mkdir -p /srv/salt-overlay/salt/kubelet mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth" kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file) (umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file)
# Generate tokens for other "service accounts". Append to known_tokens.
#
# NB: If this list ever changes, this script actually has to
# change to detect the existence of this file, kill any deleted
# old tokens and add any new tokens (to handle the upgrade case).
local -r service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns")
for account in "${service_accounts[@]}"; do
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "${token},${account},${account}" >> "${KNOWN_TOKENS_FILE}"
done
fi fi
# Configure nginx authorization # Configure nginx authorization

View File

@@ -59,10 +59,9 @@ while true; do
# Parse the output to capture the value of the second column("HEALTH"), then use grep to # Parse the output to capture the value of the second column("HEALTH"), then use grep to
# count the number of times it doesn't match "success". # count the number of times it doesn't match "success".
# Because of the header, the actual unsuccessful count is 1 minus the count. # Because of the header, the actual unsuccessful count is 1 minus the count.
non_success_count=$(echo "${kubectl_output}" | \ non_success_count=$(echo "${kubectl_output}" | \
sed -n 's/^\([[:alnum:][:punct:]]\+\)\s\+\([[:alnum:][:punct:]]\+\)\s\+.*/\2/p' | \ sed -n 's/^[[:alnum:][:punct:]]/&/p' | \
grep 'Healthy' --invert-match -c) grep --invert-match -c '^[[:alnum:][:punct:]]\{1,\}[[:space:]]\{1,\}Healthy')
if ((non_success_count > 1)); then if ((non_success_count > 1)); then
if ((attempt < 5)); then if ((attempt < 5)); then

View File

@@ -43,6 +43,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/nodecontroller" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/nodecontroller"
replicationControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/controller" replicationControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/controller"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
@@ -260,7 +261,7 @@ func podsOnMinions(c *client.Client, podNamespace string, labelSelector labels.S
podInfo := fakeKubeletClient{} podInfo := fakeKubeletClient{}
// wait for minions to indicate they have info about the desired pods // wait for minions to indicate they have info about the desired pods
return func() (bool, error) { return func() (bool, error) {
pods, err := c.Pods(podNamespace).List(labelSelector) pods, err := c.Pods(podNamespace).List(labelSelector, fields.Everything())
if err != nil { if err != nil {
glog.Infof("Unable to get pods to list: %v", err) glog.Infof("Unable to get pods to list: %v", err)
return false, nil return false, nil
@@ -384,7 +385,7 @@ containers:
namespace := kubelet.NamespaceDefault namespace := kubelet.NamespaceDefault
if err := wait.Poll(time.Second, time.Minute*2, if err := wait.Poll(time.Second, time.Minute*2,
podRunning(c, namespace, podName)); err != nil { podRunning(c, namespace, podName)); err != nil {
if pods, err := c.Pods(namespace).List(labels.Everything()); err == nil { if pods, err := c.Pods(namespace).List(labels.Everything(), fields.Everything()); err == nil {
for _, pod := range pods.Items { for _, pod := range pods.Items {
glog.Infof("pod found: %s/%s", namespace, pod.Name) glog.Infof("pod found: %s/%s", namespace, pod.Name)
} }

View File

@@ -35,6 +35,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/nodecontroller" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/nodecontroller"
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/servicecontroller" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/servicecontroller"
replicationControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/controller" replicationControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/controller"
"github.com/GoogleCloudPlatform/kubernetes/pkg/healthz"
"github.com/GoogleCloudPlatform/kubernetes/pkg/master/ports" "github.com/GoogleCloudPlatform/kubernetes/pkg/master/ports"
"github.com/GoogleCloudPlatform/kubernetes/pkg/namespace" "github.com/GoogleCloudPlatform/kubernetes/pkg/namespace"
"github.com/GoogleCloudPlatform/kubernetes/pkg/resourcequota" "github.com/GoogleCloudPlatform/kubernetes/pkg/resourcequota"
@@ -42,6 +43,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"github.com/spf13/pflag" "github.com/spf13/pflag"
) )
@@ -179,13 +181,20 @@ func (s *CMServer) Run(_ []string) error {
} }
go func() { go func() {
if s.EnableProfiling {
mux := http.NewServeMux() mux := http.NewServeMux()
healthz.InstallHandler(mux)
if s.EnableProfiling {
mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
} }
http.ListenAndServe(net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), nil) mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
}() }()
endpoints := service.NewEndpointController(kubeClient) endpoints := service.NewEndpointController(kubeClient)

View File

@@ -251,6 +251,7 @@ _kubectl_get()
must_have_one_noun+=("persistentvolume") must_have_one_noun+=("persistentvolume")
must_have_one_noun+=("persistentvolumeclaim") must_have_one_noun+=("persistentvolumeclaim")
must_have_one_noun+=("pod") must_have_one_noun+=("pod")
must_have_one_noun+=("podtemplate")
must_have_one_noun+=("replicationcontroller") must_have_one_noun+=("replicationcontroller")
must_have_one_noun+=("resourcequota") must_have_one_noun+=("resourcequota")
must_have_one_noun+=("secret") must_have_one_noun+=("secret")

View File

@@ -17,7 +17,7 @@ The **Kubelet** manages [pods](../pods.md) and their containers, their images, t
### Kube-Proxy ### Kube-Proxy
Each node also runs a simple network proxy and load balancer (see the [services FAQ](https://github.com/GoogleCloudPlatform/kubernetes/wiki/Services-FAQ) for more details). This reflects `services` (see [the services doc](../docs/services.md) for more details) as defined in the Kubernetes API on each node and can do simple TCP and UDP stream forwarding (round robin) across a set of backends. Each node also runs a simple network proxy and load balancer (see the [services FAQ](https://github.com/GoogleCloudPlatform/kubernetes/wiki/Services-FAQ) for more details). This reflects `services` (see [the services doc](../services.md) for more details) as defined in the Kubernetes API on each node and can do simple TCP and UDP stream forwarding (round robin) across a set of backends.
Service endpoints are currently found via [DNS](../dns.md) or through environment variables (both [Docker-links-compatible](https://docs.docker.com/userguide/dockerlinks/) and Kubernetes {FOO}_SERVICE_HOST and {FOO}_SERVICE_PORT variables are supported). These variables resolve to ports managed by the service proxy. Service endpoints are currently found via [DNS](../dns.md) or through environment variables (both [Docker-links-compatible](https://docs.docker.com/userguide/dockerlinks/) and Kubernetes {FOO}_SERVICE_HOST and {FOO}_SERVICE_PORT variables are supported). These variables resolve to ports managed by the service proxy.

View File

@@ -37,6 +37,7 @@ coreos:
Environment=ETCD_INITIAL_ADVERTISE_PEER_URLS=http://%host%:2380 Environment=ETCD_INITIAL_ADVERTISE_PEER_URLS=http://%host%:2380
Environment=ETCD_LISTEN_PEER_URLS=http://%host%:2380 Environment=ETCD_LISTEN_PEER_URLS=http://%host%:2380
Environment=ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001 Environment=ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001
Environment=ETCD_ADVERTISE_CLIENT_URLS=http://%host%:2379,http://%host%:4001
Environment=ETCD_INITIAL_CLUSTER=%cluster% Environment=ETCD_INITIAL_CLUSTER=%cluster%
Environment=ETCD_INITIAL_CLUSTER_STATE=new Environment=ETCD_INITIAL_CLUSTER_STATE=new
ExecStart=/opt/bin/etcd2 ExecStart=/opt/bin/etcd2

View File

@@ -129,7 +129,7 @@ coreos:
ExecStartPre=/usr/bin/curl \ ExecStartPre=/usr/bin/curl \
--silent \ --silent \
--location \ --location \
https://github.com/zettio/weave/releases/download/latest_release/weave \ https://github.com/weaveworks/weave/releases/download/v0.9.0/weave \
--output /opt/bin/weave --output /opt/bin/weave
ExecStartPre=/usr/bin/curl \ ExecStartPre=/usr/bin/curl \
--silent \ --silent \

View File

@@ -77,6 +77,11 @@ KUBE_SERVICE_ADDRESSES="--portal_net=10.254.0.0/16"
KUBE_API_ARGS="" KUBE_API_ARGS=""
``` ```
* Edit /etc/etcd/etcd.conf,let the etcd to listen all the ip instead of 127.0.0.1,if not ,you will get the error like "connection refused"
```
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:4001"
```
* Start the appropriate services on master: * Start the appropriate services on master:
``` ```

View File

@@ -34,7 +34,6 @@ import (
) )
func validateObject(obj runtime.Object) (errors []error) { func validateObject(obj runtime.Object) (errors []error) {
ctx := api.NewDefaultContext()
switch t := obj.(type) { switch t := obj.(type) {
case *api.ReplicationController: case *api.ReplicationController:
if t.Namespace == "" { if t.Namespace == "" {
@@ -49,7 +48,6 @@ func validateObject(obj runtime.Object) (errors []error) {
if t.Namespace == "" { if t.Namespace == "" {
t.Namespace = api.NamespaceDefault t.Namespace = api.NamespaceDefault
} }
api.ValidNamespace(ctx, &t.ObjectMeta)
errors = validation.ValidateService(t) errors = validation.ValidateService(t)
case *api.ServiceList: case *api.ServiceList:
for i := range t.Items { for i := range t.Items {
@@ -59,7 +57,6 @@ func validateObject(obj runtime.Object) (errors []error) {
if t.Namespace == "" { if t.Namespace == "" {
t.Namespace = api.NamespaceDefault t.Namespace = api.NamespaceDefault
} }
api.ValidNamespace(ctx, &t.ObjectMeta)
errors = validation.ValidatePod(t) errors = validation.ValidatePod(t)
case *api.PodList: case *api.PodList:
for i := range t.Items { for i := range t.Items {
@@ -68,8 +65,15 @@ func validateObject(obj runtime.Object) (errors []error) {
case *api.PersistentVolume: case *api.PersistentVolume:
errors = validation.ValidatePersistentVolume(t) errors = validation.ValidatePersistentVolume(t)
case *api.PersistentVolumeClaim: case *api.PersistentVolumeClaim:
api.ValidNamespace(ctx, &t.ObjectMeta) if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidatePersistentVolumeClaim(t) errors = validation.ValidatePersistentVolumeClaim(t)
case *api.PodTemplate:
if t.Namespace == "" {
t.Namespace = api.NamespaceDefault
}
errors = validation.ValidatePodTemplate(t)
default: default:
return []error{fmt.Errorf("no validation defined for %#v", obj)} return []error{fmt.Errorf("no validation defined for %#v", obj)}
} }
@@ -156,6 +160,7 @@ func TestExampleObjectSchemas(t *testing.T) {
"pod-with-http-healthcheck": &api.Pod{}, "pod-with-http-healthcheck": &api.Pod{},
"service": &api.Service{}, "service": &api.Service{},
"replication-controller": &api.ReplicationController{}, "replication-controller": &api.ReplicationController{},
"podtemplate": &api.PodTemplate{},
}, },
"../examples/update-demo": { "../examples/update-demo": {
"kitten-rc": &api.ReplicationController{}, "kitten-rc": &api.ReplicationController{},

72
examples/meteor/README.md Normal file
View File

@@ -0,0 +1,72 @@
Build a container for your Meteor app
-------------------------------------
To be able to run your Meteor app on Kubernetes you need to build a container for it first. To do that you need to install [Docker](https://www.docker.com) and get an account on [Docker Hub](https://hub.docker.com/). Once you have that you need to add 2 files to your Meteor project "Dockerfile" and ".dockerignore".
"Dockerfile" should contain this:
FROM chees/meteor-kubernetes
ENV ROOT_URL http://myawesomeapp.com
You should replace the ROOT_URL with the actual hostname of your app.
The .dockerignore file should contain this:
.meteor/local
packages/*/.build*
This tells Docker to ignore the files on those directories when it's building your container.
You can see an example of a Dockerfile in our [meteor-gke-example](https://github.com/Q42/meteor-gke-example) project.
Now you can build your container by running something like this in your Meteor project directory:
docker build -t chees/meteor-gke-example:1 .
Here you should replace "chees" with your own username on Docker Hub, "meteor-gke-example" with the name of your project and "1" with the version name of your build.
Push the container to your Docker hub account (replace the username and project with your own again):
docker push chees/meteor-gke-example
Running
-------
Now that you have containerized your Meteor app it's time to set up your cluster. Edit "meteor-controller.json" and make sure the "image" points to the container you just pushed to the Docker Hub.
For Mongo we use a Persistent Disk to store the data. If you're using gcloud you can create it once by running:
gcloud compute disks create --size=200GB mongo-disk
You also need to format the disk before you can use it:
gcloud compute instances attach-disk --disk=mongo-disk --device-name temp-data k8s-meteor-master
gcloud compute ssh k8s-meteor-master --command "sudo mkdir /mnt/tmp && sudo /usr/share/google/safe_format_and_mount /dev/disk/by-id/google-temp-data /mnt/tmp"
gcloud compute instances detach-disk --disk mongo-disk k8s-meteor-master
Now you can start Mongo using that disk:
kubectl create -f mongo-pod.json
kubectl create -f mongo-service.json
Wait until Mongo is started completely and then set up Meteor:
kubectl create -f meteor-controller.json
kubectl create -f meteor-service.json
Note that meteor-service.json creates an external load balancer, so your app should be available through the IP of that load balancer once the Meteor pods are started. On gcloud you can find the IP of your load balancer by running:
gcloud compute forwarding-rules list k8s-meteor-default-meteor | grep k8s-meteor-default-meteor | awk '{print $3}'
You might have to open up port 80 if it's not open yet in your project. For example:
gcloud compute firewall-rules create meteor-80 --allow=tcp:80 --target-tags k8s-meteor-node
TODO replace the mongo image with the official mongo? https://registry.hub.docker.com/_/mongo/
TODO use Kubernetes v1beta3 syntax?

View File

@@ -0,0 +1,18 @@
FROM node:0.10
MAINTAINER Christiaan Hees <christiaan@q42.nl>
ONBUILD WORKDIR /appsrc
ONBUILD COPY . /appsrc
ONBUILD RUN curl https://install.meteor.com/ | sh && \
meteor build ../app --directory --architecture os.linux.x86_64 && \
rm -rf /appsrc
# TODO rm meteor so it doesn't take space in the image?
ONBUILD WORKDIR /app/bundle
ONBUILD RUN (cd programs/server && npm install)
EXPOSE 8080
CMD []
ENV PORT 8080
ENTRYPOINT MONGO_URL=mongodb://$MONGO_SERVICE_HOST:$MONGO_SERVICE_PORT /usr/local/bin/node main.js

View File

@@ -0,0 +1,9 @@
Building the meteor-kubernetes base image
-----------------------------------------
As a normal user you don't need to do this since the image is already built and pushed to Docker Hub. You can just use it as a base image. See [this example](https://github.com/Q42/meteor-gke-example/blob/master/Dockerfile).
To build and push the base meteor-kubernetes image:
docker build -t chees/meteor-kubernetes .
docker push chees/meteor-kubernetes

View File

@@ -0,0 +1,26 @@
{
"id": "meteor-controller",
"kind": "ReplicationController",
"apiVersion": "v1beta1",
"desiredState": {
"replicas": 2,
"replicaSelector": {"name": "meteor"},
"podTemplate": {
"desiredState": {
"manifest": {
"version": "v1beta1",
"id": "meteor-controller",
"containers": [{
"name": "meteor",
"image": "chees/meteor-gke-example:latest",
"cpu": 1000,
"memory": 500000000,
"ports": [{"name": "http-server", "containerPort": 8080, "hostPort": 80}]
}]
}
},
"labels": { "name": "meteor" }
}
},
"labels": {"name": "meteor"}
}

View File

@@ -0,0 +1,10 @@
{
"apiVersion": "v1beta1",
"kind": "Service",
"id": "meteor",
"port": 80,
"containerPort": "http-server",
"selector": { "name": "meteor" },
"createExternalLoadBalancer": true,
"sessionAffinity": "ClientIP"
}

View File

@@ -0,0 +1,33 @@
{
"id": "mongo",
"kind": "Pod",
"apiVersion": "v1beta1",
"desiredState": {
"manifest": {
"version": "v1beta1",
"id": "mongo",
"containers": [{
"name": "mongo",
"image": "mongo",
"cpu": 1000,
"ports": [{ "name": "mongo", "containerPort": 27017 }],
"volumeMounts": [{
"mountPath": "/data/db",
"name": "mongo-disk"
}]
}],
"volumes": [{
"name": "mongo-disk",
"source": {
"persistentDisk": {
"pdName": "mongo-disk",
"fsType": "ext4"
}
}
}]
}
},
"labels": {
"name": "mongo", "role": "mongo"
}
}

View File

@@ -0,0 +1,13 @@
{
"id": "mongo",
"kind": "Service",
"apiVersion": "v1beta1",
"port": 27017,
"containerPort": "mongo",
"selector": {
"name": "mongo", "role": "mongo"
},
"labels": {
"name": "mongo"
}
}

View File

@@ -0,0 +1,22 @@
{
"apiVersion": "v1beta3",
"kind": "PodTemplate",
"metadata": {
"name": "nginx"
},
"template": {
"metadata": {
"labels": {
"name": "nginx"
},
"generateName": "nginx-"
},
"spec": {
"containers": [{
"name": "nginx",
"image": "dockerfile/nginx",
"ports": [{"containerPort": 80}]
}]
}
}
}

View File

@@ -102,10 +102,27 @@ else
exit 1 exit 1
fi fi
# Tell kube-up.sh to skip the update, it doesn't lock. An internal
# gcloud bug can cause racing component updates to stomp on each
# other.
export KUBE_SKIP_UPDATE=y
sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update -q" || true sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update -q" || true
GITHASH=$(gsutil cat gs://kubernetes-release/ci/latest.txt) # The "ci" bucket is for builds like "v0.15.0-468-gfa648c1"
gsutil -m cp gs://kubernetes-release/ci/${GITHASH}/kubernetes.tar.gz gs://kubernetes-release/ci/${GITHASH}/kubernetes-test.tar.gz . bucket="ci"
# The "latest" version picks the most recent "ci" or "release" build.
version_file="latest"
if [[ ${JENKINS_USE_RELEASE_TARS:-} =~ ^[yY]$ ]]; then
# The "release" bucket is for builds like "v0.15.0"
bucket="release"
if [[ ${JENKINS_USE_STABLE:-} =~ ^[yY]$ ]]; then
# The "stable" version picks the most recent "release" build.
version_file="stable"
fi
fi
githash=$(gsutil cat gs://kubernetes-release/${bucket}/${version_file}.txt)
gsutil -m cp gs://kubernetes-release/${bucket}/${githash}/kubernetes.tar.gz gs://kubernetes-release/${bucket}/${githash}/kubernetes-test.tar.gz .
fi fi
md5sum kubernetes*.tar.gz md5sum kubernetes*.tar.gz
@@ -115,9 +132,9 @@ cd kubernetes
# Set by GKE-CI to change the CLUSTER_API_VERSION to the git version # Set by GKE-CI to change the CLUSTER_API_VERSION to the git version
if [[ ! -z ${E2E_SET_CLUSTER_API_VERSION:-} ]]; then if [[ ! -z ${E2E_SET_CLUSTER_API_VERSION:-} ]]; then
export CLUSTER_API_VERSION=$(echo ${GITHASH} | cut -c 2-) export CLUSTER_API_VERSION=$(echo ${githash} | cut -c 2-)
elif [[ ! -z ${E2E_USE_LATEST_RELEASE_VERSION:-} ]]; then elif [[ ${JENKINS_USE_RELEASE_TARS:-} =~ ^[yY]$ ]]; then
release=$(gsutil cat gs://kubernetes-release/release/latest.txt | cut -c 2-) release=$(gsutil cat gs://kubernetes-release/release/${version_file}.txt | cut -c 2-)
export CLUSTER_API_VERSION=${release} export CLUSTER_API_VERSION=${release}
fi fi

View File

@@ -358,6 +358,34 @@ for version in "${kube_api_versions[@]}"; do
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" '' kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
#################
# Pod templates #
#################
# Note: pod templates exist only in v1beta3 and above, so output will always be in that form
### Create PODTEMPLATE
# Pre-condition: no PODTEMPLATE
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" ''
# Command
kubectl create -f examples/walkthrough/podtemplate.json "${kube_flags[@]}"
# Post-condition: nginx PODTEMPLATE is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
### Printing pod templates works
kubectl get podtemplates "${kube_flags[@]}"
### Display of an object which doesn't existing in v1beta1 and v1beta2 works
[[ "$(kubectl get podtemplates -o yaml "${kube_flags[@]}" | grep nginx)" ]]
### Delete nginx pod template by name
# Pre-condition: nginx pod template is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
# Command
kubectl delete podtemplate nginx "${kube_flags[@]}"
# Post-condition: No templates exist
kube::test::get_object_assert podtemplate "{{range.items}}{{.metadata.name}}:{{end}}" ''
############ ############
# Services # # Services #
############ ############

View File

@@ -80,6 +80,10 @@ func TestRESTMapper(t *testing.T) {
t.Errorf("unexpected version mapping: %s %s %v", v, k, err) t.Errorf("unexpected version mapping: %s %s %v", v, k, err)
} }
if m, err := RESTMapper.RESTMapping("PodTemplate", ""); err != nil || m.APIVersion != "v1beta3" || m.Resource != "podtemplates" {
t.Errorf("unexpected version mapping: %#v %v", m, err)
}
for _, version := range Versions { for _, version := range Versions {
mapping, err := RESTMapper.RESTMapping("ReplicationController", version) mapping, err := RESTMapper.RESTMapping("ReplicationController", version)
if err != nil { if err != nil {

View File

@@ -28,6 +28,8 @@ func init() {
&Pod{}, &Pod{},
&PodList{}, &PodList{},
&PodStatusResult{}, &PodStatusResult{},
&PodTemplate{},
&PodTemplateList{},
&ReplicationControllerList{}, &ReplicationControllerList{},
&ReplicationController{}, &ReplicationController{},
&ServiceList{}, &ServiceList{},
@@ -71,6 +73,8 @@ func init() {
func (*Pod) IsAnAPIObject() {} func (*Pod) IsAnAPIObject() {}
func (*PodList) IsAnAPIObject() {} func (*PodList) IsAnAPIObject() {}
func (*PodStatusResult) IsAnAPIObject() {} func (*PodStatusResult) IsAnAPIObject() {}
func (*PodTemplate) IsAnAPIObject() {}
func (*PodTemplateList) IsAnAPIObject() {}
func (*ReplicationController) IsAnAPIObject() {} func (*ReplicationController) IsAnAPIObject() {}
func (*ReplicationControllerList) IsAnAPIObject() {} func (*ReplicationControllerList) IsAnAPIObject() {}
func (*Service) IsAnAPIObject() {} func (*Service) IsAnAPIObject() {}

View File

@@ -179,7 +179,7 @@ func (t *Tester) TestCreateRejectsMismatchedNamespace(valid runtime.Object) {
if err == nil { if err == nil {
t.Errorf("Expected an error, but we didn't get one") t.Errorf("Expected an error, but we didn't get one")
} else if strings.Contains(err.Error(), "Controller.Namespace does not match the provided context") { } else if strings.Contains(err.Error(), "Controller.Namespace does not match the provided context") {
t.Errorf("Expected 'Controller.Namespace does not match the provided context' error, got '%v'", err.Error()) t.Errorf("Expected 'Controller.Namespace does not match the provided context' error, got '%v'", err)
} }
} }
@@ -195,7 +195,30 @@ func (t *Tester) TestCreateRejectsNamespace(valid runtime.Object) {
if err == nil { if err == nil {
t.Errorf("Expected an error, but we didn't get one") t.Errorf("Expected an error, but we didn't get one")
} else if strings.Contains(err.Error(), "Controller.Namespace does not match the provided context") { } else if strings.Contains(err.Error(), "Controller.Namespace does not match the provided context") {
t.Errorf("Expected 'Controller.Namespace does not match the provided context' error, got '%v'", err.Error()) t.Errorf("Expected 'Controller.Namespace does not match the provided context' error, got '%v'", err)
}
}
func (t *Tester) TestUpdate(valid runtime.Object, existing, older runtime.Object) {
t.TestUpdateFailsOnNotFound(copyOrDie(valid))
t.TestUpdateFailsOnVersion(copyOrDie(older))
}
func (t *Tester) TestUpdateFailsOnNotFound(valid runtime.Object) {
_, _, err := t.storage.(rest.Updater).Update(api.NewDefaultContext(), valid)
if err == nil {
t.Errorf("Expected an error, but we didn't get one")
} else if !errors.IsNotFound(err) {
t.Errorf("Expected NotFound error, got '%v'", err)
}
}
func (t *Tester) TestUpdateFailsOnVersion(older runtime.Object) {
_, _, err := t.storage.(rest.Updater).Update(api.NewDefaultContext(), older)
if err == nil {
t.Errorf("Expected an error, but we didn't get one")
} else if !errors.IsConflict(err) {
t.Errorf("Expected Conflict error, got '%v'", err)
} }
} }

View File

@@ -85,13 +85,20 @@ func roundTrip(t *testing.T, codec runtime.Codec, item runtime.Object) {
} }
// roundTripSame verifies the same source object is tested in all API versions. // roundTripSame verifies the same source object is tested in all API versions.
func roundTripSame(t *testing.T, item runtime.Object) { func roundTripSame(t *testing.T, item runtime.Object, except ...string) {
set := util.NewStringSet(except...)
seed := rand.Int63() seed := rand.Int63()
fuzzInternalObject(t, "", item, seed) fuzzInternalObject(t, "", item, seed)
if !set.Has("v1beta1") {
roundTrip(t, v1beta1.Codec, item) roundTrip(t, v1beta1.Codec, item)
}
if !set.Has("v1beta2") {
roundTrip(t, v1beta2.Codec, item) roundTrip(t, v1beta2.Codec, item)
}
if !set.Has("v1beta3") {
fuzzInternalObject(t, "v1beta3", item, seed) fuzzInternalObject(t, "v1beta3", item, seed)
roundTrip(t, v1beta3.Codec, item) roundTrip(t, v1beta3.Codec, item)
}
} }
func roundTripAll(t *testing.T, item runtime.Object) { func roundTripAll(t *testing.T, item runtime.Object) {
@@ -130,6 +137,10 @@ func TestList(t *testing.T) {
var nonRoundTrippableTypes = util.NewStringSet("ContainerManifest", "ContainerManifestList") var nonRoundTrippableTypes = util.NewStringSet("ContainerManifest", "ContainerManifestList")
var nonInternalRoundTrippableTypes = util.NewStringSet("List", "ListOptions", "PodExecOptions") var nonInternalRoundTrippableTypes = util.NewStringSet("List", "ListOptions", "PodExecOptions")
var nonRoundTrippableTypesByVersion = map[string][]string{
"PodTemplate": {"v1beta1", "v1beta2"},
"PodTemplateList": {"v1beta1", "v1beta2"},
}
func TestRoundTripTypes(t *testing.T) { func TestRoundTripTypes(t *testing.T) {
// api.Scheme.Log(t) // api.Scheme.Log(t)
@@ -148,7 +159,7 @@ func TestRoundTripTypes(t *testing.T) {
if _, err := meta.TypeAccessor(item); err != nil { if _, err := meta.TypeAccessor(item); err != nil {
t.Fatalf("%q is not a TypeMeta and cannot be tested - add it to nonRoundTrippableTypes: %v", kind, err) t.Fatalf("%q is not a TypeMeta and cannot be tested - add it to nonRoundTrippableTypes: %v", kind, err)
} }
roundTripSame(t, item) roundTripSame(t, item, nonRoundTrippableTypesByVersion[kind]...)
if !nonInternalRoundTrippableTypes.Has(kind) { if !nonInternalRoundTrippableTypes.Has(kind) {
roundTrip(t, api.Codec, fuzzInternalObject(t, "", item, rand.Int63())) roundTrip(t, api.Codec, fuzzInternalObject(t, "", item, rand.Int63()))
} }

View File

@@ -848,8 +848,8 @@ type PodTemplate struct {
TypeMeta `json:",inline"` TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"` ObjectMeta `json:"metadata,omitempty"`
// Spec defines the pods that will be created from this template // Template defines the pods that will be created from this pod template
Spec PodTemplateSpec `json:"spec,omitempty"` Template PodTemplateSpec `json:"template,omitempty"`
} }
// PodTemplateList is a list of PodTemplates. // PodTemplateList is a list of PodTemplates.

View File

@@ -854,8 +854,8 @@ type PodTemplate struct {
TypeMeta `json:",inline"` TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty" description:"standard object metadata; see https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/api-conventions.md#metadata"` ObjectMeta `json:"metadata,omitempty" description:"standard object metadata; see https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/api-conventions.md#metadata"`
// Spec defines the behavior of a pod. // Template defines the pods that will be created from this pod template
Spec PodTemplateSpec `json:"spec,omitempty" description:"specification of the desired behavior of the pod; https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/api-conventions.md#spec-and-status"` Template PodTemplateSpec `json:"template,omitempty" description:"the template of the desired behavior of the pod; https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/api-conventions.md#spec-and-status"`
} }
// PodTemplateList is a list of PodTemplates. // PodTemplateList is a list of PodTemplates.

View File

@@ -740,7 +740,7 @@ func validateContainers(containers []api.Container, volumes util.StringSet) errs
cErrs = append(cErrs, validateEnv(ctr.Env).Prefix("env")...) cErrs = append(cErrs, validateEnv(ctr.Env).Prefix("env")...)
cErrs = append(cErrs, validateVolumeMounts(ctr.VolumeMounts, volumes).Prefix("volumeMounts")...) cErrs = append(cErrs, validateVolumeMounts(ctr.VolumeMounts, volumes).Prefix("volumeMounts")...)
cErrs = append(cErrs, validatePullPolicy(&ctr).Prefix("pullPolicy")...) cErrs = append(cErrs, validatePullPolicy(&ctr).Prefix("pullPolicy")...)
cErrs = append(cErrs, validateResourceRequirements(&ctr).Prefix("resources")...) cErrs = append(cErrs, ValidateResourceRequirements(&ctr.Resources).Prefix("resources")...)
allErrs = append(allErrs, cErrs.PrefixIndex(i)...) allErrs = append(allErrs, cErrs.PrefixIndex(i)...)
} }
// Check for colliding ports across all containers. // Check for colliding ports across all containers.
@@ -888,6 +888,24 @@ func ValidatePodStatusUpdate(newPod, oldPod *api.Pod) errs.ValidationErrorList {
return allErrs return allErrs
} }
// ValidatePodTemplate tests if required fields in the pod template are set.
func ValidatePodTemplate(pod *api.PodTemplate) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
allErrs = append(allErrs, ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName).Prefix("metadata")...)
allErrs = append(allErrs, ValidatePodTemplateSpec(&pod.Template, 0).Prefix("template")...)
return allErrs
}
// ValidatePodTemplateUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
// that cannot be changed.
func ValidatePodTemplateUpdate(newPod, oldPod *api.PodTemplate) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
allErrs = append(allErrs, ValidateObjectMetaUpdate(&oldPod.ObjectMeta, &newPod.ObjectMeta).Prefix("metadata")...)
allErrs = append(allErrs, ValidatePodTemplateSpec(&newPod.Template, 0).Prefix("template")...)
return allErrs
}
var supportedSessionAffinityType = util.NewStringSet(string(api.AffinityTypeClientIP), string(api.AffinityTypeNone)) var supportedSessionAffinityType = util.NewStringSet(string(api.AffinityTypeClientIP), string(api.AffinityTypeNone))
// ValidateService tests if required fields in the service are set. // ValidateService tests if required fields in the service are set.
@@ -1167,9 +1185,9 @@ func validateBasicResource(quantity resource.Quantity) errs.ValidationErrorList
} }
// Validates resource requirement spec. // Validates resource requirement spec.
func validateResourceRequirements(container *api.Container) errs.ValidationErrorList { func ValidateResourceRequirements(requirements *api.ResourceRequirements) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{} allErrs := errs.ValidationErrorList{}
for resourceName, quantity := range container.Resources.Limits { for resourceName, quantity := range requirements.Limits {
// Validate resource name. // Validate resource name.
errs := validateResourceName(resourceName.String(), fmt.Sprintf("resources.limits[%s]", resourceName)) errs := validateResourceName(resourceName.String(), fmt.Sprintf("resources.limits[%s]", resourceName))
if api.IsStandardResourceName(resourceName.String()) { if api.IsStandardResourceName(resourceName.String()) {
@@ -1177,7 +1195,14 @@ func validateResourceRequirements(container *api.Container) errs.ValidationError
} }
allErrs = append(allErrs, errs...) allErrs = append(allErrs, errs...)
} }
for resourceName, quantity := range requirements.Requests {
// Validate resource name.
errs := validateResourceName(resourceName.String(), fmt.Sprintf("resources.requests[%s]", resourceName))
if api.IsStandardResourceName(resourceName.String()) {
errs = append(errs, validateBasicResource(quantity).Prefix(fmt.Sprintf("Resource %s: ", resourceName))...)
}
allErrs = append(allErrs, errs...)
}
return allErrs return allErrs
} }

View File

@@ -954,6 +954,16 @@ func TestValidateContainers(t *testing.T) {
ImagePullPolicy: "IfNotPresent", ImagePullPolicy: "IfNotPresent",
}, },
}, },
"Resource Requests CPU invalid": {
{
Name: "abc-123",
Image: "image",
Resources: api.ResourceRequirements{
Requests: getResourceLimits("-10", "0"),
},
ImagePullPolicy: "IfNotPresent",
},
},
"Resource Memory invalid": { "Resource Memory invalid": {
{ {
Name: "abc-123", Name: "abc-123",
@@ -1666,7 +1676,7 @@ func TestValidateService(t *testing.T) {
func TestValidateReplicationControllerUpdate(t *testing.T) { func TestValidateReplicationControllerUpdate(t *testing.T) {
validSelector := map[string]string{"a": "b"} validSelector := map[string]string{"a": "b"}
validPodTemplate := api.PodTemplate{ validPodTemplate := api.PodTemplate{
Spec: api.PodTemplateSpec{ Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Labels: validSelector, Labels: validSelector,
}, },
@@ -1678,7 +1688,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
}, },
} }
readWriteVolumePodTemplate := api.PodTemplate{ readWriteVolumePodTemplate := api.PodTemplate{
Spec: api.PodTemplateSpec{ Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Labels: validSelector, Labels: validSelector,
}, },
@@ -1692,7 +1702,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
} }
invalidSelector := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"} invalidSelector := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"}
invalidPodTemplate := api.PodTemplate{ invalidPodTemplate := api.PodTemplate{
Spec: api.PodTemplateSpec{ Template: api.PodTemplateSpec{
Spec: api.PodSpec{ Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways, RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst, DNSPolicy: api.DNSClusterFirst,
@@ -1712,7 +1722,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault},
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Selector: validSelector, Selector: validSelector,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
update: api.ReplicationController{ update: api.ReplicationController{
@@ -1720,7 +1730,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Replicas: 3, Replicas: 3,
Selector: validSelector, Selector: validSelector,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
}, },
@@ -1729,7 +1739,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault},
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Selector: validSelector, Selector: validSelector,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
update: api.ReplicationController{ update: api.ReplicationController{
@@ -1737,7 +1747,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Replicas: 1, Replicas: 1,
Selector: validSelector, Selector: validSelector,
Template: &readWriteVolumePodTemplate.Spec, Template: &readWriteVolumePodTemplate.Template,
}, },
}, },
}, },
@@ -1755,7 +1765,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault},
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Selector: validSelector, Selector: validSelector,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
update: api.ReplicationController{ update: api.ReplicationController{
@@ -1763,7 +1773,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Replicas: 2, Replicas: 2,
Selector: validSelector, Selector: validSelector,
Template: &readWriteVolumePodTemplate.Spec, Template: &readWriteVolumePodTemplate.Template,
}, },
}, },
}, },
@@ -1772,7 +1782,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault},
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Selector: validSelector, Selector: validSelector,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
update: api.ReplicationController{ update: api.ReplicationController{
@@ -1780,7 +1790,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Replicas: 2, Replicas: 2,
Selector: invalidSelector, Selector: invalidSelector,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
}, },
@@ -1789,7 +1799,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault},
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Selector: validSelector, Selector: validSelector,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
update: api.ReplicationController{ update: api.ReplicationController{
@@ -1797,7 +1807,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Replicas: 2, Replicas: 2,
Selector: validSelector, Selector: validSelector,
Template: &invalidPodTemplate.Spec, Template: &invalidPodTemplate.Template,
}, },
}, },
}, },
@@ -1806,7 +1816,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault},
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Selector: validSelector, Selector: validSelector,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
update: api.ReplicationController{ update: api.ReplicationController{
@@ -1814,7 +1824,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Replicas: -1, Replicas: -1,
Selector: validSelector, Selector: validSelector,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
}, },
@@ -1830,7 +1840,7 @@ func TestValidateReplicationControllerUpdate(t *testing.T) {
func TestValidateReplicationController(t *testing.T) { func TestValidateReplicationController(t *testing.T) {
validSelector := map[string]string{"a": "b"} validSelector := map[string]string{"a": "b"}
validPodTemplate := api.PodTemplate{ validPodTemplate := api.PodTemplate{
Spec: api.PodTemplateSpec{ Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Labels: validSelector, Labels: validSelector,
}, },
@@ -1842,7 +1852,7 @@ func TestValidateReplicationController(t *testing.T) {
}, },
} }
readWriteVolumePodTemplate := api.PodTemplate{ readWriteVolumePodTemplate := api.PodTemplate{
Spec: api.PodTemplateSpec{ Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Labels: validSelector, Labels: validSelector,
}, },
@@ -1856,7 +1866,7 @@ func TestValidateReplicationController(t *testing.T) {
} }
invalidSelector := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"} invalidSelector := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"}
invalidPodTemplate := api.PodTemplate{ invalidPodTemplate := api.PodTemplate{
Spec: api.PodTemplateSpec{ Template: api.PodTemplateSpec{
Spec: api.PodSpec{ Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways, RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst, DNSPolicy: api.DNSClusterFirst,
@@ -1871,14 +1881,14 @@ func TestValidateReplicationController(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault},
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Selector: validSelector, Selector: validSelector,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
{ {
ObjectMeta: api.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, ObjectMeta: api.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault},
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Selector: validSelector, Selector: validSelector,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
{ {
@@ -1886,7 +1896,7 @@ func TestValidateReplicationController(t *testing.T) {
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Replicas: 1, Replicas: 1,
Selector: validSelector, Selector: validSelector,
Template: &readWriteVolumePodTemplate.Spec, Template: &readWriteVolumePodTemplate.Template,
}, },
}, },
} }
@@ -1901,27 +1911,27 @@ func TestValidateReplicationController(t *testing.T) {
ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault},
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Selector: validSelector, Selector: validSelector,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
"missing-namespace": { "missing-namespace": {
ObjectMeta: api.ObjectMeta{Name: "abc-123"}, ObjectMeta: api.ObjectMeta{Name: "abc-123"},
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Selector: validSelector, Selector: validSelector,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
"empty selector": { "empty selector": {
ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault},
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
"selector_doesnt_match": { "selector_doesnt_match": {
ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault},
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Selector: map[string]string{"foo": "bar"}, Selector: map[string]string{"foo": "bar"},
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
"invalid manifest": { "invalid manifest": {
@@ -1935,7 +1945,7 @@ func TestValidateReplicationController(t *testing.T) {
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Replicas: 2, Replicas: 2,
Selector: validSelector, Selector: validSelector,
Template: &readWriteVolumePodTemplate.Spec, Template: &readWriteVolumePodTemplate.Template,
}, },
}, },
"negative_replicas": { "negative_replicas": {
@@ -1955,7 +1965,7 @@ func TestValidateReplicationController(t *testing.T) {
}, },
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Selector: validSelector, Selector: validSelector,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
"invalid_label 2": { "invalid_label 2": {
@@ -1967,7 +1977,20 @@ func TestValidateReplicationController(t *testing.T) {
}, },
}, },
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Template: &invalidPodTemplate.Spec, Template: &invalidPodTemplate.Template,
},
},
"invalid_annotation": {
ObjectMeta: api.ObjectMeta{
Name: "abc-123",
Namespace: api.NamespaceDefault,
Annotations: map[string]string{
"NoUppercaseOrSpecialCharsLike=Equals": "bar",
},
},
Spec: api.ReplicationControllerSpec{
Selector: validSelector,
Template: &validPodTemplate.Template,
}, },
}, },
"invalid restart policy 1": { "invalid restart policy 1": {

View File

@@ -104,8 +104,7 @@ func RateLimit(rl util.RateLimiter, handler http.Handler) http.Handler {
func tooManyRequests(w http.ResponseWriter) { func tooManyRequests(w http.ResponseWriter) {
// Return a 429 status indicating "Too Many Requests" // Return a 429 status indicating "Too Many Requests"
w.Header().Set("Retry-After", RetryAfter) w.Header().Set("Retry-After", RetryAfter)
w.WriteHeader(errors.StatusTooManyRequests) http.Error(w, "Too many requests, please try again later.", errors.StatusTooManyRequests)
fmt.Fprintf(w, "Too many requests, please try again later.")
} }
// RecoverPanics wraps an http Handler to recover and log panics. // RecoverPanics wraps an http Handler to recover and log panics.
@@ -113,8 +112,7 @@ func RecoverPanics(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
defer func() { defer func() {
if x := recover(); x != nil { if x := recover(); x != nil {
w.WriteHeader(http.StatusInternalServerError) http.Error(w, "apis panic. Look in log for details.", http.StatusInternalServerError)
fmt.Fprint(w, "apis panic. Look in log for details.")
glog.Infof("APIServer panic'd on %v %v: %v\n%s\n", req.Method, req.RequestURI, x, debug.Stack()) glog.Infof("APIServer panic'd on %v %v: %v\n%s\n", req.Method, req.RequestURI, x, debug.Stack())
} }
}() }()

View File

@@ -30,7 +30,7 @@ type PodsNamespacer interface {
// PodInterface has methods to work with Pod resources. // PodInterface has methods to work with Pod resources.
type PodInterface interface { type PodInterface interface {
List(selector labels.Selector) (*api.PodList, error) List(label labels.Selector, field fields.Selector) (*api.PodList, error)
Get(name string) (*api.Pod, error) Get(name string) (*api.Pod, error)
Delete(name string) error Delete(name string) error
Create(pod *api.Pod) (*api.Pod, error) Create(pod *api.Pod) (*api.Pod, error)
@@ -54,10 +54,10 @@ func newPods(c *Client, namespace string) *pods {
} }
} }
// List takes a selector, and returns the list of pods that match that selector. // List takes label and field selectors, and returns the list of pods that match those selectors.
func (c *pods) List(selector labels.Selector) (result *api.PodList, err error) { func (c *pods) List(label labels.Selector, field fields.Selector) (result *api.PodList, err error) {
result = &api.PodList{} result = &api.PodList{}
err = c.r.Get().Namespace(c.ns).Resource("pods").LabelsSelectorParam(selector).Do().Into(result) err = c.r.Get().Namespace(c.ns).Resource("pods").LabelsSelectorParam(label).FieldsSelectorParam(field).Do().Into(result)
return return
} }

View File

@@ -22,6 +22,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
) )
@@ -31,7 +32,7 @@ func TestListEmptyPods(t *testing.T) {
Request: testRequest{Method: "GET", Path: testapi.ResourcePath("pods", ns, ""), Query: buildQueryValues(ns, nil)}, Request: testRequest{Method: "GET", Path: testapi.ResourcePath("pods", ns, ""), Query: buildQueryValues(ns, nil)},
Response: Response{StatusCode: 200, Body: &api.PodList{}}, Response: Response{StatusCode: 200, Body: &api.PodList{}},
} }
podList, err := c.Setup().Pods(ns).List(labels.Everything()) podList, err := c.Setup().Pods(ns).List(labels.Everything(), fields.Everything())
c.Validate(t, podList, err) c.Validate(t, podList, err)
} }
@@ -57,7 +58,7 @@ func TestListPods(t *testing.T) {
}, },
}, },
} }
receivedPodList, err := c.Setup().Pods(ns).List(labels.Everything()) receivedPodList, err := c.Setup().Pods(ns).List(labels.Everything(), fields.Everything())
c.Validate(t, receivedPodList, err) c.Validate(t, receivedPodList, err)
} }
@@ -91,7 +92,7 @@ func TestListPodsLabels(t *testing.T) {
c.Setup() c.Setup()
c.QueryValidator[labelSelectorQueryParamName] = validateLabels c.QueryValidator[labelSelectorQueryParamName] = validateLabels
selector := labels.Set{"foo": "bar", "name": "baz"}.AsSelector() selector := labels.Set{"foo": "bar", "name": "baz"}.AsSelector()
receivedPodList, err := c.Pods(ns).List(selector) receivedPodList, err := c.Pods(ns).List(selector, fields.Everything())
c.Validate(t, receivedPodList, err) c.Validate(t, receivedPodList, err)
} }

View File

@@ -183,21 +183,13 @@ func (pf *PortForwarder) forward() error {
return nil return nil
} }
// listenOnPort creates a new listener on port and waits for new connections // listenOnPort delegates listener creation and waits for new connections
// in the background. // in the background.
func (pf *PortForwarder) listenOnPort(port *ForwardedPort) error { func (pf *PortForwarder) listenOnPort(port *ForwardedPort) error {
listener, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", port.Local)) listener, err := pf.getListener("tcp", "localhost", port)
if err != nil { if err != nil {
return err return err
} }
parts := strings.Split(listener.Addr().String(), ":")
localPort, err := strconv.ParseUint(parts[1], 10, 16)
if err != nil {
return fmt.Errorf("Error parsing local part: %s", err)
}
port.Local = uint16(localPort)
glog.Infof("Forwarding from %d -> %d", localPort, port.Remote)
pf.listeners = append(pf.listeners, listener) pf.listeners = append(pf.listeners, listener)
go pf.waitForConnection(listener, *port) go pf.waitForConnection(listener, *port)
@@ -205,6 +197,27 @@ func (pf *PortForwarder) listenOnPort(port *ForwardedPort) error {
return nil return nil
} }
// getListener creates a listener on the interface targeted by the given hostname on the given port with
// the given protocol. protocol is in net.Listen style which basically admits values like tcp, tcp4, tcp6
func (pf *PortForwarder) getListener(protocol string, hostname string, port *ForwardedPort) (net.Listener, error) {
listener, err := net.Listen(protocol, fmt.Sprintf("%s:%d", hostname, port.Local))
if err != nil {
glog.Errorf("Unable to create listener: Error %s", err)
return nil, err
}
listenerAddress := listener.Addr().String()
host, localPort, _ := net.SplitHostPort(listenerAddress)
localPortUInt, err := strconv.ParseUint(localPort, 10, 16)
if err != nil {
return nil, fmt.Errorf("Error parsing local port: %s from %s (%s)", err, listenerAddress, host)
}
port.Local = uint16(localPortUInt)
glog.Infof("Forwarding from %d -> %d", localPortUInt, port.Remote)
return listener, nil
}
// waitForConnection waits for new connections to listener and handles them in // waitForConnection waits for new connections to listener and handles them in
// the background. // the background.
func (pf *PortForwarder) waitForConnection(listener net.Listener, port ForwardedPort) { func (pf *PortForwarder) waitForConnection(listener net.Listener, port ForwardedPort) {

View File

@@ -209,6 +209,82 @@ func (s *fakeUpgradeStream) Headers() http.Header {
return http.Header{} return http.Header{}
} }
func TestGetListener(t *testing.T) {
var pf PortForwarder
testCases := []struct {
Hostname string
Protocol string
ShouldRaiseError bool
ExpectedListenerAddress string
}{
{
Hostname: "localhost",
Protocol: "tcp4",
ShouldRaiseError: false,
ExpectedListenerAddress: "127.0.0.1",
},
{
Hostname: "127.0.0.1",
Protocol: "tcp4",
ShouldRaiseError: false,
ExpectedListenerAddress: "127.0.0.1",
},
{
Hostname: "[::1]",
Protocol: "tcp6",
ShouldRaiseError: false,
ExpectedListenerAddress: "::1",
},
{
Hostname: "localhost",
Protocol: "tcp6",
ShouldRaiseError: false,
ExpectedListenerAddress: "::1",
},
{
Hostname: "[::1]",
Protocol: "tcp4",
ShouldRaiseError: true,
},
{
Hostname: "127.0.0.1",
Protocol: "tcp6",
ShouldRaiseError: true,
},
}
for i, testCase := range testCases {
expectedListenerPort := "12345"
listener, err := pf.getListener(testCase.Protocol, testCase.Hostname, &ForwardedPort{12345, 12345})
errorRaised := err != nil
if testCase.ShouldRaiseError != errorRaised {
t.Errorf("Test case #%d failed: Data %v an error has been raised(%t) where it should not (or reciprocally): %v", i, testCase, testCase.ShouldRaiseError, err)
continue
}
if errorRaised {
continue
}
if listener == nil {
t.Errorf("Test case #%d did not raised an error (%t) but failed in initializing listener", i, err)
continue
}
host, port, _ := net.SplitHostPort(listener.Addr().String())
t.Logf("Asked a %s forward for: %s:%v, got listener %s:%s, expected: %s", testCase.Protocol, testCase.Hostname, 12345, host, port, expectedListenerPort)
if host != testCase.ExpectedListenerAddress {
t.Errorf("Test case #%d failed: Listener does not listen on exepected address: asked %v got %v", i, testCase.ExpectedListenerAddress, host)
}
if port != expectedListenerPort {
t.Errorf("Test case #%d failed: Listener does not listen on exepected port: asked %v got %v", i, expectedListenerPort, port)
}
listener.Close()
}
}
func TestForwardPorts(t *testing.T) { func TestForwardPorts(t *testing.T) {
testCases := []struct { testCases := []struct {
Upgrader *fakeUpgrader Upgrader *fakeUpgrader
@@ -313,4 +389,5 @@ func TestForwardPorts(t *testing.T) {
t.Fatalf("%d: expected conn closure", i) t.Fatalf("%d: expected conn closure", i)
} }
} }
} }

View File

@@ -30,7 +30,7 @@ type FakePods struct {
Namespace string Namespace string
} }
func (c *FakePods) List(selector labels.Selector) (*api.PodList, error) { func (c *FakePods) List(label labels.Selector, field fields.Selector) (*api.PodList, error) {
obj, err := c.Fake.Invokes(FakeAction{Action: "list-pods"}, &api.PodList{}) obj, err := c.Fake.Invokes(FakeAction{Action: "list-pods"}, &api.PodList{})
return obj.(*api.PodList), err return obj.(*api.PodList), err
} }

View File

@@ -18,6 +18,7 @@ package cloudprovider
import ( import (
"net" "net"
"strings"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
) )
@@ -44,8 +45,15 @@ type Clusters interface {
// TODO(#6812): Use a shorter name that's less likely to be longer than cloud // TODO(#6812): Use a shorter name that's less likely to be longer than cloud
// providers' name length limits. // providers' name length limits.
func GetLoadBalancerName(clusterName, serviceNamespace, serviceName string) string { func GetLoadBalancerName(service *api.Service) string {
return clusterName + "-" + serviceNamespace + "-" + serviceName //GCE requires that the name of a load balancer starts with a lower case letter.
ret := "a" + string(service.UID)
ret = strings.Replace(ret, "-", "", -1)
//AWS requires that the name of a load balancer is shorter than 32 bytes.
if len(ret) > 32 {
ret = ret[:32]
}
return ret
} }
// TCPLoadBalancer is an abstract, pluggable interface for TCP load balancers. // TCPLoadBalancer is an abstract, pluggable interface for TCP load balancers.

View File

@@ -266,7 +266,7 @@ func (nc *NodeController) reconcileExternalServices(nodes *api.NodeList) (should
glog.Errorf("External load balancers for non TCP services are not currently supported: %v.", service) glog.Errorf("External load balancers for non TCP services are not currently supported: %v.", service)
continue continue
} }
name := cloudprovider.GetLoadBalancerName(nc.clusterName, service.Namespace, service.Name) name := cloudprovider.GetLoadBalancerName(&service)
err := balancer.UpdateTCPLoadBalancer(name, zone.Region, hosts) err := balancer.UpdateTCPLoadBalancer(name, zone.Region, hosts)
if err != nil { if err != nil {
glog.Errorf("External error while updating TCP load balancer: %v.", err) glog.Errorf("External error while updating TCP load balancer: %v.", err)
@@ -652,7 +652,7 @@ func (nc *NodeController) getCloudNodesWithSpec() (*api.NodeList, error) {
func (nc *NodeController) deletePods(nodeID string) error { func (nc *NodeController) deletePods(nodeID string) error {
glog.V(2).Infof("Delete all pods from %v", nodeID) glog.V(2).Infof("Delete all pods from %v", nodeID)
// TODO: We don't yet have field selectors from client, see issue #1362. // TODO: We don't yet have field selectors from client, see issue #1362.
pods, err := nc.kubeClient.Pods(api.NamespaceAll).List(labels.Everything()) pods, err := nc.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
if err != nil { if err != nil {
return err return err
} }

View File

@@ -32,6 +32,7 @@ import (
fake_cloud "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/fake" fake_cloud "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/fake"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch" "github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
) )
@@ -609,7 +610,7 @@ func TestSyncCloudNodesReconcilesExternalService(t *testing.T) {
// Set of nodes does not change: do nothing. // Set of nodes does not change: do nothing.
fakeNodeHandler: &FakeNodeHandler{ fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{newNode("node0"), newNode("node1")}, Existing: []*api.Node{newNode("node0"), newNode("node1")},
Fake: testclient.NewSimpleFake(&api.ServiceList{Items: []api.Service{*newService("service0", true), *newService("service1", false)}})}, Fake: testclient.NewSimpleFake(&api.ServiceList{Items: []api.Service{*newService("service0", types.UID(""), true), *newService("service1", types.UID(""), false)}})},
fakeCloud: &fake_cloud.FakeCloud{ fakeCloud: &fake_cloud.FakeCloud{
Machines: []string{"node0", "node1"}, Machines: []string{"node0", "node1"},
}, },
@@ -621,28 +622,28 @@ func TestSyncCloudNodesReconcilesExternalService(t *testing.T) {
// Delete "node1", target pool for "service0" should shrink. // Delete "node1", target pool for "service0" should shrink.
fakeNodeHandler: &FakeNodeHandler{ fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{newNode("node0"), newNode("node1")}, Existing: []*api.Node{newNode("node0"), newNode("node1")},
Fake: testclient.NewSimpleFake(&api.ServiceList{Items: []api.Service{*newService("service0", true), *newService("service1", false)}})}, Fake: testclient.NewSimpleFake(&api.ServiceList{Items: []api.Service{*newService("service0", types.UID("2c104a7c-e79e-11e4-8187-42010af0068a"), true), *newService("service1", types.UID(""), false)}})},
fakeCloud: &fake_cloud.FakeCloud{ fakeCloud: &fake_cloud.FakeCloud{
Machines: []string{"node0"}, Machines: []string{"node0"},
}, },
matchRE: ".*", matchRE: ".*",
expectedClientActions: []testclient.FakeAction{{Action: "list-pods"}, {Action: "list-services"}}, expectedClientActions: []testclient.FakeAction{{Action: "list-pods"}, {Action: "list-services"}},
expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{ expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{
{Name: "kubernetes-namespace-service0", Hosts: []string{"node0"}}, {Name: "a2c104a7ce79e11e4818742010af0068", Hosts: []string{"node0"}},
}, },
}, },
{ {
// Add "node1", target pool for "service0" should grow. // Add "node1", target pool for "service0" should grow.
fakeNodeHandler: &FakeNodeHandler{ fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{newNode("node0")}, Existing: []*api.Node{newNode("node0")},
Fake: testclient.NewSimpleFake(&api.ServiceList{Items: []api.Service{*newService("service0", true), *newService("service1", false)}})}, Fake: testclient.NewSimpleFake(&api.ServiceList{Items: []api.Service{*newService("service0", types.UID("2c104a7c-e79e-11e4-8187-42010af0068a"), true), *newService("service1", types.UID(""), false)}})},
fakeCloud: &fake_cloud.FakeCloud{ fakeCloud: &fake_cloud.FakeCloud{
Machines: []string{"node0", "node1"}, Machines: []string{"node0", "node1"},
}, },
matchRE: ".*", matchRE: ".*",
expectedClientActions: []testclient.FakeAction{{Action: "list-services"}}, expectedClientActions: []testclient.FakeAction{{Action: "list-services"}},
expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{ expectedUpdateCalls: []fake_cloud.FakeUpdateBalancerCall{
{Name: "kubernetes-namespace-service0", Hosts: []string{"node0", "node1"}}, {Name: "a2c104a7ce79e11e4818742010af0068", Hosts: []string{"node0", "node1"}},
}, },
}, },
} }
@@ -1128,8 +1129,8 @@ func newPod(name, host string) *api.Pod {
return &api.Pod{ObjectMeta: api.ObjectMeta{Name: name}, Spec: api.PodSpec{Host: host}} return &api.Pod{ObjectMeta: api.ObjectMeta{Name: name}, Spec: api.PodSpec{Host: host}}
} }
func newService(name string, external bool) *api.Service { func newService(name string, uid types.UID, external bool) *api.Service {
return &api.Service{ObjectMeta: api.ObjectMeta{Name: name, Namespace: "namespace"}, Spec: api.ServiceSpec{CreateExternalLoadBalancer: external}} return &api.Service{ObjectMeta: api.ObjectMeta{Name: name, Namespace: "namespace", UID: uid}, Spec: api.ServiceSpec{CreateExternalLoadBalancer: external}}
} }
func sortedNodeNames(nodes []*api.Node) []string { func sortedNodeNames(nodes []*api.Node) []string {

View File

@@ -419,7 +419,7 @@ func needsUpdate(oldService *api.Service, newService *api.Service) bool {
} }
func (s *ServiceController) loadBalancerName(service *api.Service) string { func (s *ServiceController) loadBalancerName(service *api.Service) string {
return cloudprovider.GetLoadBalancerName(s.clusterName, service.Namespace, service.Name) return cloudprovider.GetLoadBalancerName(service)
} }
func getTCPPorts(service *api.Service) ([]int, error) { func getTCPPorts(service *api.Service) ([]int, error) {

View File

@@ -223,7 +223,7 @@ func (s activePods) Less(i, j int) bool {
func (rm *ReplicationManager) syncReplicationController(controller api.ReplicationController) error { func (rm *ReplicationManager) syncReplicationController(controller api.ReplicationController) error {
s := labels.Set(controller.Spec.Selector).AsSelector() s := labels.Set(controller.Spec.Selector).AsSelector()
podList, err := rm.kubeClient.Pods(controller.Namespace).List(s) podList, err := rm.kubeClient.Pods(controller.Namespace).List(s, fields.Everything())
if err != nil { if err != nil {
return err return err
} }

View File

@@ -0,0 +1,97 @@
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package conversion_test
import (
"io/ioutil"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi"
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1"
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2"
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta3"
)
func BenchmarkPodConversion(b *testing.B) {
data, err := ioutil.ReadFile("pod_example.json")
if err != nil {
b.Fatalf("unexpected error while reading file: %v", err)
}
var pod api.Pod
if err := api.Scheme.DecodeInto(data, &pod); err != nil {
b.Fatalf("unexpected error decoding pod: %v", err)
}
scheme := api.Scheme.Raw()
for i := 0; i < b.N; i++ {
versionedObj, err := scheme.ConvertToVersion(&pod, testapi.Version())
if err != nil {
b.Fatalf("Conversion error: %v", err)
}
_, err = scheme.ConvertToVersion(versionedObj, scheme.InternalVersion)
if err != nil {
b.Fatalf("Conversion error: %v", err)
}
}
}
func BenchmarkNodeConversion(b *testing.B) {
data, err := ioutil.ReadFile("node_example.json")
if err != nil {
b.Fatalf("unexpected error while reading file: %v", err)
}
var node api.Node
if err := api.Scheme.DecodeInto(data, &node); err != nil {
b.Fatalf("unexpected error decoding node: %v", err)
}
scheme := api.Scheme.Raw()
for i := 0; i < b.N; i++ {
versionedObj, err := scheme.ConvertToVersion(&node, testapi.Version())
if err != nil {
b.Fatalf("Conversion error: %v", err)
}
_, err = scheme.ConvertToVersion(versionedObj, scheme.InternalVersion)
if err != nil {
b.Fatalf("Conversion error: %v", err)
}
}
}
func BenchmarkReplicationControllerConversion(b *testing.B) {
data, err := ioutil.ReadFile("replication_controller_example.json")
if err != nil {
b.Fatalf("unexpected error while reading file: %v", err)
}
var replicationController api.ReplicationController
if err := api.Scheme.DecodeInto(data, &replicationController); err != nil {
b.Fatalf("unexpected error decoding node: %v", err)
}
scheme := api.Scheme.Raw()
for i := 0; i < b.N; i++ {
versionedObj, err := scheme.ConvertToVersion(&replicationController, testapi.Version())
if err != nil {
b.Fatalf("Conversion error: %v", err)
}
_, err = scheme.ConvertToVersion(versionedObj, scheme.InternalVersion)
if err != nil {
b.Fatalf("Conversion error: %v", err)
}
}
}

View File

@@ -0,0 +1,49 @@
{
"kind": "Node",
"apiVersion": "v1beta3",
"metadata": {
"name": "e2e-test-wojtekt-minion-etd6",
"selfLink": "/api/v1beta1/nodes/e2e-test-wojtekt-minion-etd6",
"uid": "a7e89222-e8e5-11e4-8fde-42010af09327",
"resourceVersion": "379",
"creationTimestamp": "2015-04-22T11:49:39Z"
},
"spec": {
"externalID": "15488322946290398375"
},
"status": {
"capacity": {
"cpu": "1",
"memory": "1745152Ki"
},
"conditions": [
{
"type": "Ready",
"status": "True",
"lastHeartbeatTime": "2015-04-22T11:58:17Z",
"lastTransitionTime": "2015-04-22T11:49:52Z",
"reason": "kubelet is posting ready status"
}
],
"addresses": [
{
"type": "ExternalIP",
"address": "104.197.49.213"
},
{
"type": "LegacyHostIP",
"address": "104.197.20.11"
}
],
"nodeInfo": {
"machineID": "",
"systemUUID": "D59FA3FA-7B5B-7287-5E1A-1D79F13CB577",
"bootID": "44a832f3-8cfb-4de5-b7d2-d66030b6cd95",
"kernelVersion": "3.16.0-0.bpo.4-amd64",
"osImage": "Debian GNU/Linux 7 (wheezy)",
"containerRuntimeVersion": "docker://1.5.0",
"kubeletVersion": "v0.15.0-484-g0c8ee980d705a3-dirty",
"KubeProxyVersion": "v0.15.0-484-g0c8ee980d705a3-dirty"
}
}
}

View File

@@ -0,0 +1,102 @@
{
"kind": "Pod",
"apiVersion": "v1beta3",
"metadata": {
"name": "etcd-server-e2e-test-wojtekt-master",
"namespace": "default",
"selfLink": "/api/v1beta1/pods/etcd-server-e2e-test-wojtekt-master?namespace=default",
"uid": "a671734a-e8e5-11e4-8fde-42010af09327",
"resourceVersion": "22",
"creationTimestamp": "2015-04-22T11:49:36Z",
"annotations": {
"kubernetes.io/config.mirror": "mirror",
"kubernetes.io/config.source": "file"
}
},
"spec": {
"volumes": [
{
"name": "varetcd",
"hostPath": {
"path": "/mnt/master-pd/var/etcd"
},
"emptyDir": null,
"gcePersistentDisk": null,
"awsElasticBlockStore": null,
"gitRepo": null,
"secret": null,
"nfs": null,
"iscsi": null,
"glusterfs": null
}
],
"containers": [
{
"name": "etcd-container",
"image": "gcr.io/google_containers/etcd:2.0.9",
"command": [
"/usr/local/bin/etcd",
"--addr",
"127.0.0.1:4001",
"--bind-addr",
"127.0.0.1:4001",
"--data-dir",
"/var/etcd/data"
],
"ports": [
{
"name": "serverport",
"hostPort": 2380,
"containerPort": 2380,
"protocol": "TCP"
},
{
"name": "clientport",
"hostPort": 4001,
"containerPort": 4001,
"protocol": "TCP"
}
],
"resources": {},
"volumeMounts": [
{
"name": "varetcd",
"mountPath": "/var/etcd"
}
],
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "IfNotPresent",
"capabilities": {}
}
],
"restartPolicy": "Always",
"dnsPolicy": "ClusterFirst",
"host": "e2e-test-wojtekt-master",
"hostNetwork": true
},
"status": {
"phase": "Running",
"Condition": [
{
"type": "Ready",
"status": "True"
}
],
"containerStatuses": [
{
"name": "etcd-container",
"state": {
"running": {
"startedAt": "2015-04-22T11:49:32Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "gcr.io/google_containers/etcd:2.0.9",
"imageID": "docker://b6b9a86dc06aa1361357ca1b105feba961f6a4145adca6c54e142c0be0fe87b0",
"containerID": "docker://3cbbf818f1addfc252957b4504f56ef2907a313fe6afc47fc75373674255d46d"
}
]
}
}

View File

@@ -0,0 +1,82 @@
{
"kind": "ReplicationController",
"apiVersion": "v1beta3",
"metadata": {
"name": "elasticsearch-logging-controller",
"namespace": "default",
"selfLink": "/api/v1beta1/replicationControllers/elasticsearch-logging-controller?namespace=default",
"uid": "aa76f162-e8e5-11e4-8fde-42010af09327",
"resourceVersion": "98",
"creationTimestamp": "2015-04-22T11:49:43Z",
"labels": {
"kubernetes.io/cluster-service": "true",
"name": "elasticsearch-logging"
}
},
"spec": {
"replicas": 1,
"selector": {
"name": "elasticsearch-logging"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"kubernetes.io/cluster-service": "true",
"name": "elasticsearch-logging"
}
},
"spec": {
"volumes": [
{
"name": "es-persistent-storage",
"hostPath": null,
"emptyDir": {
"medium": ""
},
"gcePersistentDisk": null,
"awsElasticBlockStore": null,
"gitRepo": null,
"secret": null,
"nfs": null,
"iscsi": null,
"glusterfs": null
}
],
"containers": [
{
"name": "elasticsearch-logging",
"image": "gcr.io/google_containers/elasticsearch:1.0",
"ports": [
{
"name": "es-port",
"containerPort": 9200,
"protocol": "TCP"
},
{
"name": "es-transport-port",
"containerPort": 9300,
"protocol": "TCP"
}
],
"resources": {},
"volumeMounts": [
{
"name": "es-persistent-storage",
"mountPath": "/data"
}
],
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "IfNotPresent",
"capabilities": {}
}
],
"restartPolicy": "Always",
"dnsPolicy": "ClusterFirst"
}
}
},
"status": {
"replicas": 1
}
}

View File

@@ -26,6 +26,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/golang/glog" "github.com/golang/glog"
@@ -479,7 +480,7 @@ func (d *NodeDescriber) Describe(namespace, name string) (string, error) {
} }
var pods []*api.Pod var pods []*api.Pod
allPods, err := d.Pods(namespace).List(labels.Everything()) allPods, err := d.Pods(namespace).List(labels.Everything(), fields.Everything())
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -613,7 +614,7 @@ func printReplicationControllersByLabels(matchingRCs []api.ReplicationController
} }
func getPodStatusForReplicationController(c client.PodInterface, controller *api.ReplicationController) (running, waiting, succeeded, failed int, err error) { func getPodStatusForReplicationController(c client.PodInterface, controller *api.ReplicationController) (running, waiting, succeeded, failed int, err error) {
rcPods, err := c.List(labels.SelectorFromSet(controller.Spec.Selector)) rcPods, err := c.List(labels.SelectorFromSet(controller.Spec.Selector), fields.Everything())
if err != nil { if err != nil {
return return
} }

View File

@@ -32,6 +32,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
"github.com/docker/docker/pkg/units" "github.com/docker/docker/pkg/units"
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
@@ -243,7 +244,8 @@ func (h *HumanReadablePrinter) HandledResources() []string {
return keys return keys
} }
var podColumns = []string{"POD", "IP", "CONTAINER(S)", "IMAGE(S)", "HOST", "LABELS", "STATUS", "CREATED"} var podColumns = []string{"POD", "IP", "CONTAINER(S)", "IMAGE(S)", "HOST", "LABELS", "STATUS", "CREATED", "MESSAGE"}
var podTemplateColumns = []string{"TEMPLATE", "CONTAINER(S)", "IMAGE(S)", "PODLABELS"}
var replicationControllerColumns = []string{"CONTROLLER", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "REPLICAS"} var replicationControllerColumns = []string{"CONTROLLER", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "REPLICAS"}
var serviceColumns = []string{"NAME", "LABELS", "SELECTOR", "IP", "PORT(S)"} var serviceColumns = []string{"NAME", "LABELS", "SELECTOR", "IP", "PORT(S)"}
var endpointColumns = []string{"NAME", "ENDPOINTS"} var endpointColumns = []string{"NAME", "ENDPOINTS"}
@@ -262,6 +264,8 @@ var componentStatusColumns = []string{"NAME", "STATUS", "MESSAGE", "ERROR"}
func (h *HumanReadablePrinter) addDefaultHandlers() { func (h *HumanReadablePrinter) addDefaultHandlers() {
h.Handler(podColumns, printPod) h.Handler(podColumns, printPod)
h.Handler(podColumns, printPodList) h.Handler(podColumns, printPodList)
h.Handler(podTemplateColumns, printPodTemplate)
h.Handler(podTemplateColumns, printPodTemplateList)
h.Handler(replicationControllerColumns, printReplicationController) h.Handler(replicationControllerColumns, printReplicationController)
h.Handler(replicationControllerColumns, printReplicationControllerList) h.Handler(replicationControllerColumns, printReplicationControllerList)
h.Handler(serviceColumns, printService) h.Handler(serviceColumns, printService)
@@ -339,32 +343,92 @@ func podHostString(host, ip string) string {
return host + "/" + ip return host + "/" + ip
} }
// translateTimestamp returns the elapsed time since timestamp in
// human-readable approximation.
func translateTimestamp(timestamp util.Time) string {
return units.HumanDuration(time.Now().Sub(timestamp.Time))
}
// interpretContainerStatus interprets the container status and returns strings
// associated with columns "STATUS", "CREATED", and "MESSAGE".
// The meaning of MESSAGE varies based on the context of STATUS:
// STATUS: Waiting; MESSAGE: reason for waiting
// STATUS: Running; MESSAGE: reason for the last termination
// STATUS: Terminated; MESSAGE: reason for this termination
func interpretContainerStatus(status *api.ContainerStatus) (string, string, string, error) {
// Helper function to compose a meaning message from terminate state.
getTermMsg := func(state *api.ContainerStateTerminated) string {
var message string
if state != nil {
message = fmt.Sprintf("exit code %d", state.ExitCode)
if state.Reason != "" {
message = fmt.Sprintf("%s, reason: %s", state.Reason)
}
}
return message
}
state := &status.State
if state.Waiting != nil {
return "Waiting", "", state.Waiting.Reason, nil
} else if state.Running != nil {
// Get the information of the last termination state. This is useful if
// a container is stuck in a crash loop.
message := getTermMsg(status.LastTerminationState.Termination)
if message != "" {
message = "last termination: " + message
}
return "Running", translateTimestamp(state.Running.StartedAt), message, nil
} else if state.Termination != nil {
return "Terminated", translateTimestamp(state.Termination.StartedAt), getTermMsg(state.Termination), nil
}
return "", "", "", fmt.Errorf("unknown container state %#v", *state)
}
func printPod(pod *api.Pod, w io.Writer) error { func printPod(pod *api.Pod, w io.Writer) error {
// TODO: remove me when pods are converted _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
spec := &api.PodSpec{}
if err := api.Scheme.Convert(&pod.Spec, spec); err != nil {
glog.Errorf("Unable to convert pod manifest: %v", err)
}
containers := spec.Containers
var firstContainer api.Container
if len(containers) > 0 {
firstContainer, containers = containers[0], containers[1:]
}
_, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
pod.Name, pod.Name,
pod.Status.PodIP, pod.Status.PodIP,
firstContainer.Name, "", "",
firstContainer.Image,
podHostString(pod.Spec.Host, pod.Status.HostIP), podHostString(pod.Spec.Host, pod.Status.HostIP),
formatLabels(pod.Labels), formatLabels(pod.Labels),
pod.Status.Phase, pod.Status.Phase,
units.HumanDuration(time.Now().Sub(pod.CreationTimestamp.Time))) translateTimestamp(pod.CreationTimestamp),
pod.Status.Message,
)
if err != nil { if err != nil {
return err return err
} }
// Lay out all the other containers on separate lines. // Lay out all containers on separate lines.
for _, container := range containers { statuses := pod.Status.ContainerStatuses
_, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "", "", container.Name, container.Image, "", "", "", "") if len(statuses) == 0 {
// Container status has not been reported yet. Print basic information
// of the containers and exit the function.
for _, container := range pod.Spec.Containers {
_, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
"", "", container.Name, container.Image, "", "", "", "")
if err != nil {
return err
}
}
return nil
}
// Print the actual container statuses.
for _, status := range statuses {
state, created, message, err := interpretContainerStatus(&status)
if err != nil {
return err
}
_, err = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
"", "",
status.Name,
status.Image,
"", "",
state,
created,
message,
)
if err != nil { if err != nil {
return err return err
} }
@@ -381,6 +445,40 @@ func printPodList(podList *api.PodList, w io.Writer) error {
return nil return nil
} }
func printPodTemplate(pod *api.PodTemplate, w io.Writer) error {
containers := pod.Template.Spec.Containers
var firstContainer api.Container
if len(containers) > 0 {
firstContainer, containers = containers[0], containers[1:]
}
_, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n",
pod.Name,
firstContainer.Name,
firstContainer.Image,
formatLabels(pod.Template.Labels),
)
if err != nil {
return err
}
// Lay out all the other containers on separate lines.
for _, container := range containers {
_, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", "", container.Name, container.Image, "")
if err != nil {
return err
}
}
return nil
}
func printPodTemplateList(podList *api.PodTemplateList, w io.Writer) error {
for _, pod := range podList.Items {
if err := printPodTemplate(&pod, w); err != nil {
return err
}
}
return nil
}
func printReplicationController(controller *api.ReplicationController, w io.Writer) error { func printReplicationController(controller *api.ReplicationController, w io.Writer) error {
containers := controller.Spec.Template.Spec.Containers containers := controller.Spec.Template.Spec.Containers
var firstContainer api.Container var firstContainer api.Container

View File

@@ -0,0 +1,32 @@
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package container
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/probe"
)
// HandlerRunner runs a lifecycle handler for a container.
type HandlerRunner interface {
Run(containerID string, pod *api.Pod, container *api.Container, handler *api.Handler) error
}
// Prober checks the healthiness of a container.
type Prober interface {
Probe(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error)
}

View File

@@ -25,11 +25,20 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
) )
type Version interface {
// Compare compares two versions of the runtime. On success it returns -1
// if the version is less than the other, 1 if it is greater than the other,
// or 0 if they are equal.
Compare(other string) (int, error)
// String returns a string that represents the version.
String() string
}
// Runtime interface defines the interfaces that should be implemented // Runtime interface defines the interfaces that should be implemented
// by a container runtime. // by a container runtime.
type Runtime interface { type Runtime interface {
// Version returns a map of version information of the container runtime. // Version returns the version information of the container runtime.
Version() (map[string]string, error) Version() (Version, error)
// GetPods returns a list containers group by pods. The boolean parameter // GetPods returns a list containers group by pods. The boolean parameter
// specifies whether the runtime returns all containers including those already // specifies whether the runtime returns all containers including those already
// exited and dead containers (used for garbage collection). // exited and dead containers (used for garbage collection).

View File

@@ -120,7 +120,8 @@ type dockerContainerCommandRunner struct {
var dockerAPIVersionWithExec, _ = docker.NewAPIVersion("1.15") var dockerAPIVersionWithExec, _ = docker.NewAPIVersion("1.15")
// Returns the major and minor version numbers of docker server. // Returns the major and minor version numbers of docker server.
func (d *dockerContainerCommandRunner) GetDockerServerVersion() (docker.APIVersion, error) { // TODO(yifan): Remove this once the ContainerCommandRunner is implemented by dockerManager.
func (d *dockerContainerCommandRunner) getDockerServerVersion() (docker.APIVersion, error) {
env, err := d.client.Version() env, err := d.client.Version()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get docker server version - %v", err) return nil, fmt.Errorf("failed to get docker server version - %v", err)
@@ -136,7 +137,7 @@ func (d *dockerContainerCommandRunner) GetDockerServerVersion() (docker.APIVersi
} }
func (d *dockerContainerCommandRunner) nativeExecSupportExists() (bool, error) { func (d *dockerContainerCommandRunner) nativeExecSupportExists() (bool, error) {
version, err := d.GetDockerServerVersion() version, err := d.getDockerServerVersion()
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -295,7 +296,11 @@ func (d *dockerContainerCommandRunner) PortForward(pod *kubecontainer.Pod, port
} }
containerPid := container.State.Pid containerPid := container.State.Pid
// TODO use exec.LookPath for socat / what if the host doesn't have it??? // TODO what if the host doesn't have it???
_, lookupErr := exec.LookPath("socat")
if lookupErr != nil {
return fmt.Errorf("Unable to do port forwarding: socat not found.")
}
args := []string{"-t", fmt.Sprintf("%d", containerPid), "-n", "socat", "-", fmt.Sprintf("TCP4:localhost:%d", port)} args := []string{"-t", fmt.Sprintf("%d", containerPid), "-n", "socat", "-", fmt.Sprintf("TCP4:localhost:%d", port)}
// TODO use exec.LookPath // TODO use exec.LookPath
command := exec.Command("nsenter", args...) command := exec.Command("nsenter", args...)
@@ -479,7 +484,6 @@ func ConnectToDockerOrDie(dockerEndpoint string) DockerInterface {
// TODO(yifan): Move this to container.Runtime. // TODO(yifan): Move this to container.Runtime.
type ContainerCommandRunner interface { type ContainerCommandRunner interface {
RunInContainer(containerID string, cmd []string) ([]byte, error) RunInContainer(containerID string, cmd []string) ([]byte, error)
GetDockerServerVersion() (docker.APIVersion, error)
ExecInContainer(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error ExecInContainer(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error
PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error
} }

View File

@@ -129,10 +129,10 @@ func TestContainerManifestNaming(t *testing.T) {
} }
} }
func TestGetDockerServerVersion(t *testing.T) { func TestVersion(t *testing.T) {
fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.3", "ApiVersion=1.15"}} fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.3", "ApiVersion=1.15"}}
runner := dockerContainerCommandRunner{fakeDocker} manager := &DockerManager{client: fakeDocker}
version, err := runner.GetDockerServerVersion() version, err := manager.Version()
if err != nil { if err != nil {
t.Errorf("got error while getting docker server version - %s", err) t.Errorf("got error while getting docker server version - %s", err)
} }

View File

@@ -336,6 +336,8 @@ func (dm *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) {
continue continue
} }
var containerStatus api.ContainerStatus var containerStatus api.ContainerStatus
containerStatus.Name = container.Name
containerStatus.Image = container.Image
if oldStatus, found := oldStatuses[container.Name]; found { if oldStatus, found := oldStatuses[container.Name]; found {
// Some states may be lost due to GC; apply the last observed // Some states may be lost due to GC; apply the last observed
// values if possible. // values if possible.
@@ -655,3 +657,43 @@ func (dm *DockerManager) PodInfraContainerChanged(pod *api.Pod, podInfraContaine
} }
return podInfraContainer.Hash != HashContainer(expectedPodInfraContainer), nil return podInfraContainer.Hash != HashContainer(expectedPodInfraContainer), nil
} }
type dockerVersion docker.APIVersion
func NewVersion(input string) (dockerVersion, error) {
version, err := docker.NewAPIVersion(input)
return dockerVersion(version), err
}
func (dv dockerVersion) String() string {
return docker.APIVersion(dv).String()
}
func (dv dockerVersion) Compare(other string) (int, error) {
a := docker.APIVersion(dv)
b, err := docker.NewAPIVersion(other)
if err != nil {
return 0, err
}
if a.LessThan(b) {
return -1, nil
}
if a.GreaterThan(b) {
return 1, nil
}
return 0, nil
}
func (dm *DockerManager) Version() (kubecontainer.Version, error) {
env, err := dm.client.Version()
if err != nil {
return nil, fmt.Errorf("docker: failed to get docker version: %v", err)
}
apiVersion := env.Get("ApiVersion")
version, err := docker.NewAPIVersion(apiVersion)
if err != nil {
return nil, fmt.Errorf("docker: failed to parse docker server version %q: %v", apiVersion, err)
}
return dockerVersion(version), nil
}

View File

@@ -22,15 +22,12 @@ import (
"strconv" "strconv"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog" "github.com/golang/glog"
) )
type HandlerRunner interface {
Run(containerID string, pod *api.Pod, container *api.Container, handler *api.Handler) error
}
type handlerRunner struct { type handlerRunner struct {
httpGetter httpGetter httpGetter httpGetter
commandRunner dockertools.ContainerCommandRunner commandRunner dockertools.ContainerCommandRunner
@@ -38,7 +35,7 @@ type handlerRunner struct {
} }
// TODO(yifan): Merge commandRunner and containerManager once containerManager implements the ContainerCommandRunner interface. // TODO(yifan): Merge commandRunner and containerManager once containerManager implements the ContainerCommandRunner interface.
func NewHandlerRunner(httpGetter httpGetter, commandRunner dockertools.ContainerCommandRunner, containerManager *dockertools.DockerManager) *handlerRunner { func newHandlerRunner(httpGetter httpGetter, commandRunner dockertools.ContainerCommandRunner, containerManager *dockertools.DockerManager) kubecontainer.HandlerRunner {
return &handlerRunner{ return &handlerRunner{
httpGetter: httpGetter, httpGetter: httpGetter,
commandRunner: commandRunner, commandRunner: commandRunner,

View File

@@ -232,8 +232,8 @@ func NewMainKubelet(
} }
klet.podManager = newBasicPodManager(klet.kubeClient) klet.podManager = newBasicPodManager(klet.kubeClient)
klet.prober = NewProber(klet.runner, klet.readinessManager, klet.containerRefManager, klet.recorder) klet.prober = newProber(klet.runner, klet.readinessManager, klet.containerRefManager, klet.recorder)
klet.handlerRunner = NewHandlerRunner(klet.httpClient, klet.runner, klet.containerManager) klet.handlerRunner = newHandlerRunner(klet.httpClient, klet.runner, klet.containerManager)
runtimeCache, err := kubecontainer.NewRuntimeCache(containerManager) runtimeCache, err := kubecontainer.NewRuntimeCache(containerManager)
if err != nil { if err != nil {
@@ -317,10 +317,10 @@ type Kubelet struct {
networkPlugin network.NetworkPlugin networkPlugin network.NetworkPlugin
// Healthy check prober. // Healthy check prober.
prober *Prober prober kubecontainer.Prober
// Container lifecycle handler runner. // Container lifecycle handler runner.
handlerRunner HandlerRunner handlerRunner kubecontainer.HandlerRunner
// Container readiness state manager. // Container readiness state manager.
readinessManager *kubecontainer.ReadinessManager readinessManager *kubecontainer.ReadinessManager
@@ -1060,20 +1060,16 @@ type podContainerChangesSpec struct {
containersToKeep map[dockertools.DockerID]int containersToKeep map[dockertools.DockerID]int
} }
func (kl *Kubelet) computePodContainerChanges(pod *api.Pod, runningPod kubecontainer.Pod) (podContainerChangesSpec, error) { func (kl *Kubelet) computePodContainerChanges(pod *api.Pod, runningPod kubecontainer.Pod, podStatus api.PodStatus) (podContainerChangesSpec, error) {
podFullName := kubecontainer.GetPodFullName(pod) podFullName := kubecontainer.GetPodFullName(pod)
uid := pod.UID uid := pod.UID
glog.V(4).Infof("Syncing Pod %+v, podFullName: %q, uid: %q", pod, podFullName, uid) glog.V(4).Infof("Syncing Pod %+v, podFullName: %q, uid: %q", pod, podFullName, uid)
err := kl.makePodDataDirs(pod)
if err != nil {
return podContainerChangesSpec{}, err
}
containersToStart := make(map[int]empty) containersToStart := make(map[int]empty)
containersToKeep := make(map[dockertools.DockerID]int) containersToKeep := make(map[dockertools.DockerID]int)
createPodInfraContainer := false createPodInfraContainer := false
var err error
var podInfraContainerID dockertools.DockerID var podInfraContainerID dockertools.DockerID
var changed bool var changed bool
podInfraContainer := runningPod.FindContainerByName(dockertools.PodInfraContainerName) podInfraContainer := runningPod.FindContainerByName(dockertools.PodInfraContainerName)
@@ -1097,18 +1093,6 @@ func (kl *Kubelet) computePodContainerChanges(pod *api.Pod, runningPod kubeconta
containersToKeep[podInfraContainerID] = -1 containersToKeep[podInfraContainerID] = -1
} }
// Do not use the cache here since we need the newest status to check
// if we need to restart the container below.
pod, found := kl.GetPodByFullName(podFullName)
if !found {
return podContainerChangesSpec{}, fmt.Errorf("couldn't find pod %q", podFullName)
}
podStatus, err := kl.generatePodStatus(pod)
if err != nil {
glog.Errorf("Unable to get pod with name %q and uid %q info with error(%v)", podFullName, uid, err)
return podContainerChangesSpec{}, err
}
for index, container := range pod.Spec.Containers { for index, container := range pod.Spec.Containers {
expectedHash := dockertools.HashContainer(&container) expectedHash := dockertools.HashContainer(&container)
@@ -1213,7 +1197,18 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
return err return err
} }
containerChanges, err := kl.computePodContainerChanges(pod, runningPod) if err := kl.makePodDataDirs(pod); err != nil {
glog.Errorf("Unable to make pod data directories for pod %q (uid %q): %v", podFullName, uid, err)
return err
}
podStatus, err := kl.generatePodStatus(pod)
if err != nil {
glog.Errorf("Unable to get status for pod %q (uid %q): %v", podFullName, uid, err)
return err
}
containerChanges, err := kl.computePodContainerChanges(pod, runningPod, podStatus)
glog.V(3).Infof("Got container changes for pod %q: %+v", podFullName, containerChanges) glog.V(3).Infof("Got container changes for pod %q: %+v", podFullName, containerChanges)
if err != nil { if err != nil {
return err return err
@@ -1658,13 +1653,12 @@ func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) {
} }
} }
// Returns Docker version for this Kubelet. // Returns the container runtime version for this Kubelet.
func (kl *Kubelet) GetDockerVersion() (docker.APIVersion, error) { func (kl *Kubelet) GetContainerRuntimeVersion() (kubecontainer.Version, error) {
if kl.dockerClient == nil { if kl.containerManager == nil {
return nil, fmt.Errorf("no Docker client") return nil, fmt.Errorf("no container runtime")
} }
dockerRunner := dockertools.NewDockerContainerCommandRunner(kl.dockerClient) return kl.containerManager.Version()
return dockerRunner.GetDockerServerVersion()
} }
func (kl *Kubelet) validatePodPhase(podStatus *api.PodStatus) error { func (kl *Kubelet) validatePodPhase(podStatus *api.PodStatus) error {
@@ -1770,6 +1764,21 @@ func (kl *Kubelet) recordNodeOnlineEvent() {
kl.recorder.Eventf(kl.nodeRef, "online", "Node %s is now online", kl.hostname) kl.recorder.Eventf(kl.nodeRef, "online", "Node %s is now online", kl.hostname)
} }
func (kl *Kubelet) recordNodeSchedulableEvent() {
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
kl.recorder.Eventf(kl.nodeRef, "schedulable", "Node %s is now schedulable", kl.hostname)
}
func (kl *Kubelet) recordNodeUnschedulableEvent() {
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
kl.recorder.Eventf(kl.nodeRef, "unschedulable", "Node %s is now unschedulable", kl.hostname)
}
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
var oldNodeUnschedulable bool
// tryUpdateNodeStatus tries to update node status to master. // tryUpdateNodeStatus tries to update node status to master.
func (kl *Kubelet) tryUpdateNodeStatus() error { func (kl *Kubelet) tryUpdateNodeStatus() error {
node, err := kl.kubeClient.Nodes().Get(kl.hostname) node, err := kl.kubeClient.Nodes().Get(kl.hostname)
@@ -1836,6 +1845,14 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
kl.recordNodeOnlineEvent() kl.recordNodeOnlineEvent()
} }
if oldNodeUnschedulable != node.Spec.Unschedulable {
if node.Spec.Unschedulable {
kl.recordNodeUnschedulableEvent()
} else {
kl.recordNodeSchedulableEvent()
}
oldNodeUnschedulable = node.Spec.Unschedulable
}
_, err = kl.kubeClient.Nodes().UpdateStatus(node) _, err = kl.kubeClient.Nodes().UpdateStatus(node)
return err return err
} }

View File

@@ -113,8 +113,8 @@ func newTestKubelet(t *testing.T) *TestKubelet {
}, },
fakeRecorder) fakeRecorder)
kubelet.containerManager.Puller = &dockertools.FakeDockerPuller{} kubelet.containerManager.Puller = &dockertools.FakeDockerPuller{}
kubelet.prober = NewProber(nil, kubelet.readinessManager, kubelet.containerRefManager, kubelet.recorder) kubelet.prober = newProber(nil, kubelet.readinessManager, kubelet.containerRefManager, kubelet.recorder)
kubelet.handlerRunner = NewHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, kubelet.containerManager) kubelet.handlerRunner = newHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, kubelet.containerManager)
return &TestKubelet{kubelet, fakeDocker, mockCadvisor, fakeKubeClient, waitGroup, fakeMirrorClient} return &TestKubelet{kubelet, fakeDocker, mockCadvisor, fakeKubeClient, waitGroup, fakeMirrorClient}
} }
@@ -511,10 +511,10 @@ func TestSyncPodsDoesNothing(t *testing.T) {
waitGroup.Wait() waitGroup.Wait()
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{
"list", "list", "list", "list",
// Check the pod infra contianer.
"inspect_container",
// Get pod status. // Get pod status.
"list", "inspect_container", "inspect_container", "list", "inspect_container", "inspect_container",
// Check the pod infra contianer.
"inspect_container",
// Get pod status. // Get pod status.
"list", "inspect_container", "inspect_container"}) "list", "inspect_container", "inspect_container"})
} }
@@ -743,10 +743,10 @@ func TestSyncPodsWithPodInfraCreatesContainer(t *testing.T) {
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{
"list", "list", "list", "list",
// Check the pod infra container.
"inspect_container",
// Get pod status. // Get pod status.
"list", "inspect_container", "inspect_image", "list", "inspect_container", "inspect_image",
// Check the pod infra container.
"inspect_container",
// Create container. // Create container.
"create", "start", "create", "start",
// Get pod status. // Get pod status.
@@ -768,7 +768,7 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
waitGroup := testKubelet.waitGroup waitGroup := testKubelet.waitGroup
fakeHttp := fakeHTTP{} fakeHttp := fakeHTTP{}
kubelet.httpClient = &fakeHttp kubelet.httpClient = &fakeHttp
kubelet.handlerRunner = NewHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager) kubelet.handlerRunner = newHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager)
pods := []*api.Pod{ pods := []*api.Pod{
{ {
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
@@ -818,10 +818,10 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{
"list", "list", "list", "list",
// Check the pod infra container.
"inspect_container",
// Get pod status. // Get pod status.
"list", "inspect_container", "inspect_image", "list", "inspect_container", "inspect_image",
// Check the pod infra container.
"inspect_container",
// Create container. // Create container.
"create", "start", "create", "start",
// Get pod status. // Get pod status.
@@ -1104,10 +1104,10 @@ func TestSyncPodsDeletesDuplicate(t *testing.T) {
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{
"list", "list", "list", "list",
// Check the pod infra container.
"inspect_container",
// Get pod status. // Get pod status.
"list", "inspect_container", "inspect_container", "inspect_container", "list", "inspect_container", "inspect_container", "inspect_container",
// Check the pod infra container.
"inspect_container",
// Kill the duplicated container. // Kill the duplicated container.
"stop", "stop",
// Get pod status. // Get pod status.
@@ -1175,10 +1175,10 @@ func TestSyncPodsBadHash(t *testing.T) {
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{
"list", "list", "list", "list",
// Check the pod infra container.
"inspect_container",
// Get pod status. // Get pod status.
"list", "inspect_container", "inspect_container", "list", "inspect_container", "inspect_container",
// Check the pod infra container.
"inspect_container",
// Kill and restart the bad hash container. // Kill and restart the bad hash container.
"stop", "create", "start", "stop", "create", "start",
// Get pod status. // Get pod status.
@@ -1249,10 +1249,10 @@ func TestSyncPodsUnhealthy(t *testing.T) {
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{
"list", "list", "list", "list",
// Check the pod infra container.
"inspect_container",
// Get pod status. // Get pod status.
"list", "inspect_container", "inspect_container", "list", "inspect_container", "inspect_container",
// Check the pod infra container.
"inspect_container",
// Kill the unhealthy container. // Kill the unhealthy container.
"stop", "stop",
// Restart the unhealthy container. // Restart the unhealthy container.
@@ -1597,10 +1597,6 @@ func (f *fakeContainerCommandRunner) RunInContainer(id string, cmd []string) ([]
return []byte{}, f.E return []byte{}, f.E
} }
func (f *fakeContainerCommandRunner) GetDockerServerVersion() (docker.APIVersion, error) {
return nil, nil
}
func (f *fakeContainerCommandRunner) ExecInContainer(id string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error { func (f *fakeContainerCommandRunner) ExecInContainer(id string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error {
f.Cmd = cmd f.Cmd = cmd
f.ID = id f.ID = id
@@ -1694,7 +1690,7 @@ func TestRunHandlerExec(t *testing.T) {
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
fakeDocker := testKubelet.fakeDocker fakeDocker := testKubelet.fakeDocker
kubelet.runner = &fakeCommandRunner kubelet.runner = &fakeCommandRunner
kubelet.handlerRunner = NewHandlerRunner(&fakeHTTP{}, kubelet.runner, kubelet.containerManager) kubelet.handlerRunner = newHandlerRunner(&fakeHTTP{}, kubelet.runner, kubelet.containerManager)
containerID := "abc1234" containerID := "abc1234"
podName := "podFoo" podName := "podFoo"
@@ -1749,7 +1745,7 @@ func TestRunHandlerHttp(t *testing.T) {
testKubelet := newTestKubelet(t) testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
kubelet.httpClient = &fakeHttp kubelet.httpClient = &fakeHttp
kubelet.handlerRunner = NewHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager) kubelet.handlerRunner = newHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager)
containerID := "abc1234" containerID := "abc1234"
podName := "podFoo" podName := "podFoo"
@@ -1817,7 +1813,7 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
kubelet.httpClient = &fakeHTTP{ kubelet.httpClient = &fakeHTTP{
err: fmt.Errorf("test error"), err: fmt.Errorf("test error"),
} }
kubelet.handlerRunner = NewHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager) kubelet.handlerRunner = newHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager)
pods := []*api.Pod{ pods := []*api.Pod{
{ {
@@ -1868,10 +1864,10 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
verifyCalls(t, fakeDocker, []string{ verifyCalls(t, fakeDocker, []string{
"list", "list", "list", "list",
// Check the pod infra container.
"inspect_container",
// Get pod status. // Get pod status.
"list", "inspect_container", "inspect_image", "list", "inspect_container", "inspect_image",
// Check the pod infra container.
"inspect_container",
// Create the container. // Create the container.
"create", "start", "create", "start",
// Kill the container since event handler fails. // Kill the container since event handler fails.
@@ -3871,10 +3867,10 @@ func TestSyncPodsWithRestartPolicy(t *testing.T) {
{ {
api.RestartPolicyAlways, api.RestartPolicyAlways,
[]string{"list", "list", []string{"list", "list",
// Check the pod infra container.
"inspect_container",
// Get pod status. // Get pod status.
"list", "inspect_container", "inspect_container", "inspect_container", "list", "inspect_container", "inspect_container", "inspect_container",
// Check the pod infra container.
"inspect_container",
// Restart both containers. // Restart both containers.
"create", "start", "create", "start", "create", "start", "create", "start",
// Get pod status. // Get pod status.
@@ -3885,10 +3881,10 @@ func TestSyncPodsWithRestartPolicy(t *testing.T) {
{ {
api.RestartPolicyOnFailure, api.RestartPolicyOnFailure,
[]string{"list", "list", []string{"list", "list",
// Check the pod infra container.
"inspect_container",
// Get pod status. // Get pod status.
"list", "inspect_container", "inspect_container", "inspect_container", "list", "inspect_container", "inspect_container", "inspect_container",
// Check the pod infra container.
"inspect_container",
// Restart the failed container. // Restart the failed container.
"create", "start", "create", "start",
// Get pod status. // Get pod status.
@@ -3899,10 +3895,10 @@ func TestSyncPodsWithRestartPolicy(t *testing.T) {
{ {
api.RestartPolicyNever, api.RestartPolicyNever,
[]string{"list", "list", []string{"list", "list",
// Check the pod infra container.
"inspect_container",
// Get pod status. // Get pod status.
"list", "inspect_container", "inspect_container", "inspect_container", "list", "inspect_container", "inspect_container", "inspect_container",
// Check the pod infra container.
"inspect_container",
// Stop the last pod infra container. // Stop the last pod infra container.
"stop", "stop",
// Get pod status. // Get pod status.

View File

@@ -37,9 +37,8 @@ import (
const maxProbeRetries = 3 const maxProbeRetries = 3
// Prober helps to check the liveness/readiness of a container. // prober helps to check the liveness/readiness of a container.
// TODO(yifan): Replace the concrete type with interface later. type prober struct {
type Prober struct {
exec execprobe.ExecProber exec execprobe.ExecProber
http httprobe.HTTPProber http httprobe.HTTPProber
tcp tcprobe.TCPProber tcp tcprobe.TCPProber
@@ -52,13 +51,13 @@ type Prober struct {
// NewProber creates a Prober, it takes a command runner and // NewProber creates a Prober, it takes a command runner and
// several container info managers. // several container info managers.
func NewProber( func newProber(
runner dockertools.ContainerCommandRunner, runner dockertools.ContainerCommandRunner,
readinessManager *kubecontainer.ReadinessManager, readinessManager *kubecontainer.ReadinessManager,
refManager *kubecontainer.RefManager, refManager *kubecontainer.RefManager,
recorder record.EventRecorder) *Prober { recorder record.EventRecorder) kubecontainer.Prober {
return &Prober{ return &prober{
exec: execprobe.New(), exec: execprobe.New(),
http: httprobe.New(), http: httprobe.New(),
tcp: tcprobe.New(), tcp: tcprobe.New(),
@@ -73,7 +72,7 @@ func NewProber(
// Probe checks the liveness/readiness of the given container. // Probe checks the liveness/readiness of the given container.
// If the container's liveness probe is unsuccessful, set readiness to false. // If the container's liveness probe is unsuccessful, set readiness to false.
// If liveness is successful, do a readiness check and set readiness accordingly. // If liveness is successful, do a readiness check and set readiness accordingly.
func (pb *Prober) Probe(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) { func (pb *prober) Probe(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) {
// Probe liveness. // Probe liveness.
live, err := pb.probeLiveness(pod, status, container, containerID, createdAt) live, err := pb.probeLiveness(pod, status, container, containerID, createdAt)
if err != nil { if err != nil {
@@ -113,7 +112,7 @@ func (pb *Prober) Probe(pod *api.Pod, status api.PodStatus, container api.Contai
// probeLiveness probes the liveness of a container. // probeLiveness probes the liveness of a container.
// If the initalDelay since container creation on liveness probe has not passed the probe will return probe.Success. // If the initalDelay since container creation on liveness probe has not passed the probe will return probe.Success.
func (pb *Prober) probeLiveness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) { func (pb *prober) probeLiveness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) {
p := container.LivenessProbe p := container.LivenessProbe
if p == nil { if p == nil {
return probe.Success, nil return probe.Success, nil
@@ -126,7 +125,7 @@ func (pb *Prober) probeLiveness(pod *api.Pod, status api.PodStatus, container ap
// probeReadiness probes the readiness of a container. // probeReadiness probes the readiness of a container.
// If the initial delay on the readiness probe has not passed the probe will return probe.Failure. // If the initial delay on the readiness probe has not passed the probe will return probe.Failure.
func (pb *Prober) probeReadiness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) { func (pb *prober) probeReadiness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) {
p := container.ReadinessProbe p := container.ReadinessProbe
if p == nil { if p == nil {
return probe.Success, nil return probe.Success, nil
@@ -139,7 +138,7 @@ func (pb *Prober) probeReadiness(pod *api.Pod, status api.PodStatus, container a
// runProbeWithRetries tries to probe the container in a finite loop, it returns the last result // runProbeWithRetries tries to probe the container in a finite loop, it returns the last result
// if it never succeeds. // if it never succeeds.
func (pb *Prober) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID string, retires int) (probe.Result, error) { func (pb *prober) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID string, retires int) (probe.Result, error) {
var err error var err error
var result probe.Result var result probe.Result
for i := 0; i < retires; i++ { for i := 0; i < retires; i++ {
@@ -151,7 +150,7 @@ func (pb *Prober) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.Pod
return result, err return result, err
} }
func (pb *Prober) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID string) (probe.Result, error) { func (pb *prober) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID string) (probe.Result, error) {
timeout := time.Duration(p.TimeoutSeconds) * time.Second timeout := time.Duration(p.TimeoutSeconds) * time.Second
if p.Exec != nil { if p.Exec != nil {
glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v", pod, container) glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v", pod, container)
@@ -228,7 +227,7 @@ type execInContainer struct {
run func() ([]byte, error) run func() ([]byte, error)
} }
func (p *Prober) newExecInContainer(pod *api.Pod, container api.Container, containerID string) exec.Cmd { func (p *prober) newExecInContainer(pod *api.Pod, container api.Container, containerID string) exec.Cmd {
return execInContainer{func() ([]byte, error) { return execInContainer{func() ([]byte, error) {
return p.runner.RunInContainer(containerID, container.LivenessProbe.Exec.Command) return p.runner.RunInContainer(containerID, container.LivenessProbe.Exec.Command)
}} }}

View File

@@ -152,7 +152,7 @@ func makeTestKubelet(result probe.Result, err error) *Kubelet {
containerRefManager: kubecontainer.NewRefManager(), containerRefManager: kubecontainer.NewRefManager(),
} }
kl.prober = &Prober{ kl.prober = &prober{
exec: fakeExecProber{ exec: fakeExecProber{
result: result, result: result,
err: err, err: err,

View File

@@ -41,7 +41,6 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/flushwriter" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/flushwriter"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream/spdy" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream/spdy"
"github.com/fsouza/go-dockerclient"
"github.com/golang/glog" "github.com/golang/glog"
cadvisorApi "github.com/google/cadvisor/info/v1" cadvisorApi "github.com/google/cadvisor/info/v1"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@@ -101,7 +100,7 @@ func ListenAndServeKubeletReadOnlyServer(host HostInterface, address net.IP, por
type HostInterface interface { type HostInterface interface {
GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error)
GetRootInfo(req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) GetRootInfo(req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error)
GetDockerVersion() (docker.APIVersion, error) GetContainerRuntimeVersion() (kubecontainer.Version, error)
GetCachedMachineInfo() (*cadvisorApi.MachineInfo, error) GetCachedMachineInfo() (*cadvisorApi.MachineInfo, error)
GetPods() []*api.Pod GetPods() []*api.Pod
GetPodByName(namespace, name string) (*api.Pod, bool) GetPodByName(namespace, name string) (*api.Pod, bool)
@@ -160,18 +159,18 @@ func (s *Server) error(w http.ResponseWriter, err error) {
http.Error(w, msg, http.StatusInternalServerError) http.Error(w, msg, http.StatusInternalServerError)
} }
func isValidDockerVersion(ver docker.APIVersion) bool {
minAllowedVersion, _ := docker.NewAPIVersion("1.15")
return ver.GreaterThanOrEqualTo(minAllowedVersion)
}
func (s *Server) dockerHealthCheck(req *http.Request) error { func (s *Server) dockerHealthCheck(req *http.Request) error {
version, err := s.host.GetDockerVersion() version, err := s.host.GetContainerRuntimeVersion()
if err != nil { if err != nil {
return errors.New("unknown Docker version") return errors.New("unknown Docker version")
} }
if !isValidDockerVersion(version) { // Verify the docker version.
return fmt.Errorf("Docker version is too old (%v)", version.String()) result, err := version.Compare("1.15")
if err != nil {
return err
}
if result < 0 {
return fmt.Errorf("Docker version is too old: %q", version.String())
} }
return nil return nil
} }

View File

@@ -32,10 +32,11 @@ import (
"time" "time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream/spdy" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream/spdy"
"github.com/fsouza/go-dockerclient"
cadvisorApi "github.com/google/cadvisor/info/v1" cadvisorApi "github.com/google/cadvisor/info/v1"
) )
@@ -48,7 +49,7 @@ type fakeKubelet struct {
podsFunc func() []*api.Pod podsFunc func() []*api.Pod
logFunc func(w http.ResponseWriter, req *http.Request) logFunc func(w http.ResponseWriter, req *http.Request)
runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error)
dockerVersionFunc func() (docker.APIVersion, error) containerVersionFunc func() (kubecontainer.Version, error)
execFunc func(pod string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error execFunc func(pod string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error
portForwardFunc func(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error portForwardFunc func(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error
containerLogsFunc func(podFullName, containerName, tail string, follow bool, stdout, stderr io.Writer) error containerLogsFunc func(podFullName, containerName, tail string, follow bool, stdout, stderr io.Writer) error
@@ -72,8 +73,8 @@ func (fk *fakeKubelet) GetRootInfo(req *cadvisorApi.ContainerInfoRequest) (*cadv
return fk.rootInfoFunc(req) return fk.rootInfoFunc(req)
} }
func (fk *fakeKubelet) GetDockerVersion() (docker.APIVersion, error) { func (fk *fakeKubelet) GetContainerRuntimeVersion() (kubecontainer.Version, error) {
return fk.dockerVersionFunc() return fk.containerVersionFunc()
} }
func (fk *fakeKubelet) GetCachedMachineInfo() (*cadvisorApi.MachineInfo, error) { func (fk *fakeKubelet) GetCachedMachineInfo() (*cadvisorApi.MachineInfo, error) {
@@ -450,8 +451,8 @@ func TestPodsInfo(t *testing.T) {
func TestHealthCheck(t *testing.T) { func TestHealthCheck(t *testing.T) {
fw := newServerTest() fw := newServerTest()
fw.fakeKubelet.dockerVersionFunc = func() (docker.APIVersion, error) { fw.fakeKubelet.containerVersionFunc = func() (kubecontainer.Version, error) {
return docker.NewAPIVersion("1.15") return dockertools.NewVersion("1.15")
} }
fw.fakeKubelet.hostnameFunc = func() string { fw.fakeKubelet.hostnameFunc = func() string {
return "127.0.0.1" return "127.0.0.1"
@@ -489,9 +490,9 @@ func TestHealthCheck(t *testing.T) {
t.Errorf("expected status code %d, got %d", http.StatusOK, resp.StatusCode) t.Errorf("expected status code %d, got %d", http.StatusOK, resp.StatusCode)
} }
//Test with old docker version //Test with old container runtime version
fw.fakeKubelet.dockerVersionFunc = func() (docker.APIVersion, error) { fw.fakeKubelet.containerVersionFunc = func() (kubecontainer.Version, error) {
return docker.NewAPIVersion("1.1") return dockertools.NewVersion("1.1")
} }
resp, err = http.Get(fw.testHTTPServer.URL + "/healthz") resp, err = http.Get(fw.testHTTPServer.URL + "/healthz")

View File

@@ -58,6 +58,7 @@ import (
pvcetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/persistentvolumeclaim/etcd" pvcetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/persistentvolumeclaim/etcd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod"
podetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/etcd" podetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/etcd"
podtemplateetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/podtemplate/etcd"
resourcequotaetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/resourcequota/etcd" resourcequotaetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/resourcequota/etcd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/secret" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/secret"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service"
@@ -360,9 +361,18 @@ func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter)
// init initializes master. // init initializes master.
func (m *Master) init(c *Config) { func (m *Master) init(c *Config) {
// TODO: make initialization of the helper part of the Master, and allow some storage
// objects to have a newer storage version than the user's default.
newerHelper, err := NewEtcdHelper(c.EtcdHelper.Client, "v1beta3")
if err != nil {
glog.Fatalf("Unable to setup storage for v1beta3: %v", err)
}
podStorage := podetcd.NewStorage(c.EtcdHelper, c.KubeletClient) podStorage := podetcd.NewStorage(c.EtcdHelper, c.KubeletClient)
podRegistry := pod.NewRegistry(podStorage.Pod) podRegistry := pod.NewRegistry(podStorage.Pod)
podTemplateStorage := podtemplateetcd.NewREST(newerHelper)
eventRegistry := event.NewEtcdRegistry(c.EtcdHelper, uint64(c.EventTTL.Seconds())) eventRegistry := event.NewEtcdRegistry(c.EtcdHelper, uint64(c.EventTTL.Seconds()))
limitRangeRegistry := limitrange.NewEtcdRegistry(c.EtcdHelper) limitRangeRegistry := limitrange.NewEtcdRegistry(c.EtcdHelper)
@@ -397,6 +407,8 @@ func (m *Master) init(c *Config) {
"pods/binding": podStorage.Binding, "pods/binding": podStorage.Binding,
"bindings": podStorage.Binding, "bindings": podStorage.Binding,
"podTemplates": podTemplateStorage,
"replicationControllers": controllerStorage, "replicationControllers": controllerStorage,
"services": service.NewStorage(m.serviceRegistry, m.nodeRegistry, m.endpointRegistry, m.portalNet, c.ClusterName), "services": service.NewStorage(m.serviceRegistry, m.nodeRegistry, m.endpointRegistry, m.portalNet, c.ClusterName),
"endpoints": endpointsStorage, "endpoints": endpointsStorage,
@@ -606,6 +618,9 @@ func (m *Master) defaultAPIGroupVersion() *apiserver.APIGroupVersion {
func (m *Master) api_v1beta1() *apiserver.APIGroupVersion { func (m *Master) api_v1beta1() *apiserver.APIGroupVersion {
storage := make(map[string]rest.Storage) storage := make(map[string]rest.Storage)
for k, v := range m.storage { for k, v := range m.storage {
if k == "podTemplates" {
continue
}
storage[k] = v storage[k] = v
} }
version := m.defaultAPIGroupVersion() version := m.defaultAPIGroupVersion()
@@ -619,6 +634,9 @@ func (m *Master) api_v1beta1() *apiserver.APIGroupVersion {
func (m *Master) api_v1beta2() *apiserver.APIGroupVersion { func (m *Master) api_v1beta2() *apiserver.APIGroupVersion {
storage := make(map[string]rest.Storage) storage := make(map[string]rest.Storage)
for k, v := range m.storage { for k, v := range m.storage {
if k == "podTemplates" {
continue
}
storage[k] = v storage[k] = v
} }
version := m.defaultAPIGroupVersion() version := m.defaultAPIGroupVersion()

View File

@@ -246,7 +246,7 @@ func deleteReplicationControllers(kubeClient client.Interface, ns string) error
} }
func deletePods(kubeClient client.Interface, ns string) error { func deletePods(kubeClient client.Interface, ns string) error {
items, err := kubeClient.Pods(ns).List(labels.Everything()) items, err := kubeClient.Pods(ns).List(labels.Everything(), fields.Everything())
if err != nil { if err != nil {
return err return err
} }

View File

@@ -60,7 +60,7 @@ func createController(storage *REST, rc api.ReplicationController, t *testing.T)
} }
var validPodTemplate = api.PodTemplate{ var validPodTemplate = api.PodTemplate{
Spec: api.PodTemplateSpec{ Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"a": "b"}, Labels: map[string]string{"a": "b"},
}, },
@@ -79,8 +79,8 @@ var validPodTemplate = api.PodTemplate{
} }
var validControllerSpec = api.ReplicationControllerSpec{ var validControllerSpec = api.ReplicationControllerSpec{
Selector: validPodTemplate.Spec.Labels, Selector: validPodTemplate.Template.Labels,
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
} }
var validController = api.ReplicationController{ var validController = api.ReplicationController{
@@ -161,7 +161,7 @@ func TestCreateControllerWithGeneratedName(t *testing.T) {
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Replicas: 2, Replicas: 2,
Selector: map[string]string{"a": "b"}, Selector: map[string]string{"a": "b"},
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
} }
@@ -663,7 +663,7 @@ func TestCreate(t *testing.T) {
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Replicas: 2, Replicas: 2,
Selector: map[string]string{"a": "b"}, Selector: map[string]string{"a": "b"},
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
// invalid // invalid
@@ -671,7 +671,7 @@ func TestCreate(t *testing.T) {
Spec: api.ReplicationControllerSpec{ Spec: api.ReplicationControllerSpec{
Replicas: 2, Replicas: 2,
Selector: map[string]string{}, Selector: map[string]string{},
Template: &validPodTemplate.Spec, Template: &validPodTemplate.Template,
}, },
}, },
) )

View File

@@ -35,7 +35,6 @@ import (
) )
// podStrategy implements behavior for Pods // podStrategy implements behavior for Pods
// TODO: move to a pod specific package.
type podStrategy struct { type podStrategy struct {
runtime.ObjectTyper runtime.ObjectTyper
api.NameGenerator api.NameGenerator

View File

@@ -0,0 +1,18 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package podtemplate provides RESTStorage implementations for storing PodTemplate API objects.
package podtemplate

View File

@@ -0,0 +1,63 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic"
etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/podtemplate"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
)
// rest implements a RESTStorage for pod templates against etcd
type REST struct {
etcdgeneric.Etcd
}
// NewREST returns a RESTStorage object that will work against pod templates.
func NewREST(h tools.EtcdHelper) *REST {
prefix := "/registry/podtemplates"
store := etcdgeneric.Etcd{
NewFunc: func() runtime.Object { return &api.PodTemplate{} },
NewListFunc: func() runtime.Object { return &api.PodTemplateList{} },
KeyRootFunc: func(ctx api.Context) string {
return etcdgeneric.NamespaceKeyRootFunc(ctx, prefix)
},
KeyFunc: func(ctx api.Context, name string) (string, error) {
return etcdgeneric.NamespaceKeyFunc(ctx, prefix, name)
},
ObjectNameFunc: func(obj runtime.Object) (string, error) {
return obj.(*api.PodTemplate).Name, nil
},
PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher {
return podtemplate.MatchPodTemplate(label, field)
},
EndpointName: "podtemplates",
CreateStrategy: podtemplate.Strategy,
UpdateStrategy: podtemplate.Strategy,
ReturnDeletedObject: true,
Helper: h,
}
return &REST{store}
}

View File

@@ -0,0 +1,99 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd
import (
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/rest/resttest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta3"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
)
func newHelper(t *testing.T) (*tools.FakeEtcdClient, tools.EtcdHelper) {
fakeEtcdClient := tools.NewFakeEtcdClient(t)
fakeEtcdClient.TestIndex = true
helper := tools.NewEtcdHelper(fakeEtcdClient, v1beta3.Codec)
return fakeEtcdClient, helper
}
func validNewPodTemplate(name string) *api.PodTemplate {
return &api.PodTemplate{
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: api.NamespaceDefault,
},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"test": "foo"},
},
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
Containers: []api.Container{
{
Name: "foo",
Image: "test",
ImagePullPolicy: api.PullAlways,
TerminationMessagePath: api.TerminationMessagePathDefault,
},
},
},
},
}
}
func TestCreate(t *testing.T) {
fakeEtcdClient, helper := newHelper(t)
storage := NewREST(helper)
test := resttest.New(t, storage, fakeEtcdClient.SetError)
pod := validNewPodTemplate("foo")
pod.ObjectMeta = api.ObjectMeta{}
test.TestCreate(
// valid
pod,
// invalid
&api.PodTemplate{
Template: api.PodTemplateSpec{},
},
)
}
func TestUpdate(t *testing.T) {
fakeEtcdClient, helper := newHelper(t)
storage := NewREST(helper)
test := resttest.New(t, storage, fakeEtcdClient.SetError)
fakeEtcdClient.ExpectNotFoundGet("/registry/podtemplates/default/foo")
fakeEtcdClient.ChangeIndex = 2
pod := validNewPodTemplate("foo")
existing := validNewPodTemplate("exists")
obj, err := storage.Create(api.NewDefaultContext(), existing)
if err != nil {
t.Fatalf("unable to create object: %v", err)
}
older := obj.(*api.PodTemplate)
older.ResourceVersion = "1"
test.TestUpdate(
pod,
existing,
older,
)
}

View File

@@ -0,0 +1,81 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podtemplate
import (
"fmt"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
errs "github.com/GoogleCloudPlatform/kubernetes/pkg/util/fielderrors"
)
// podTemplateStrategy implements behavior for PodTemplates
type podTemplateStrategy struct {
runtime.ObjectTyper
api.NameGenerator
}
// Strategy is the default logic that applies when creating and updating PodTemplate
// objects via the REST API.
var Strategy = podTemplateStrategy{api.Scheme, api.SimpleNameGenerator}
// NamespaceScoped is true for pod templates.
func (podTemplateStrategy) NamespaceScoped() bool {
return true
}
// PrepareForCreate clears fields that are not allowed to be set by end users on creation.
func (podTemplateStrategy) PrepareForCreate(obj runtime.Object) {
_ = obj.(*api.PodTemplate)
}
// Validate validates a new pod template.
func (podTemplateStrategy) Validate(ctx api.Context, obj runtime.Object) errs.ValidationErrorList {
pod := obj.(*api.PodTemplate)
return validation.ValidatePodTemplate(pod)
}
// AllowCreateOnUpdate is false for pod templates.
func (podTemplateStrategy) AllowCreateOnUpdate() bool {
return false
}
// PrepareForUpdate clears fields that are not allowed to be set by end users on update.
func (podTemplateStrategy) PrepareForUpdate(obj, old runtime.Object) {
_ = obj.(*api.PodTemplate)
}
// ValidateUpdate is the default update validation for an end user.
func (podTemplateStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) errs.ValidationErrorList {
return validation.ValidatePodTemplateUpdate(obj.(*api.PodTemplate), old.(*api.PodTemplate))
}
// MatchPodTemplate returns a generic matcher for a given label and field selector.
func MatchPodTemplate(label labels.Selector, field fields.Selector) generic.Matcher {
return generic.MatcherFunc(func(obj runtime.Object) (bool, error) {
podObj, ok := obj.(*api.PodTemplate)
if !ok {
return false, fmt.Errorf("not a pod template")
}
return label.Matches(labels.Set(podObj.Labels)), nil
})
}

View File

@@ -148,7 +148,7 @@ func (rm *ResourceQuotaManager) syncResourceQuota(quota api.ResourceQuota) (err
pods := &api.PodList{} pods := &api.PodList{}
if set[api.ResourcePods] || set[api.ResourceMemory] || set[api.ResourceCPU] { if set[api.ResourcePods] || set[api.ResourceMemory] || set[api.ResourceCPU] {
pods, err = rm.kubeClient.Pods(usage.Namespace).List(labels.Everything()) pods, err = rm.kubeClient.Pods(usage.Namespace).List(labels.Everything(), fields.Everything())
if err != nil { if err != nil {
return err return err
} }
@@ -240,6 +240,28 @@ func PodCPU(pod *api.Pod) *resource.Quantity {
return resource.NewMilliQuantity(int64(val), resource.DecimalSI) return resource.NewMilliQuantity(int64(val), resource.DecimalSI)
} }
// IsPodCPUUnbounded returns true if the cpu use is unbounded for any container in pod
func IsPodCPUUnbounded(pod *api.Pod) bool {
for j := range pod.Spec.Containers {
container := pod.Spec.Containers[j]
if container.Resources.Limits.Cpu().MilliValue() == int64(0) {
return true
}
}
return false
}
// IsPodMemoryUnbounded returns true if the memory use is unbounded for any container in pod
func IsPodMemoryUnbounded(pod *api.Pod) bool {
for j := range pod.Spec.Containers {
container := pod.Spec.Containers[j]
if container.Resources.Limits.Memory().Value() == int64(0) {
return true
}
}
return false
}
// PodMemory computes the memory usage of a pod // PodMemory computes the memory usage of a pod
func PodMemory(pod *api.Pod) *resource.Quantity { func PodMemory(pod *api.Pod) *resource.Quantity {
val := int64(0) val := int64(0)

View File

@@ -267,3 +267,63 @@ func TestSyncResourceQuotaNoChange(t *testing.T) {
t.Errorf("SyncResourceQuota made an unexpected client action when state was not dirty: %v", kubeClient.Actions) t.Errorf("SyncResourceQuota made an unexpected client action when state was not dirty: %v", kubeClient.Actions)
} }
} }
func TestIsPodCPUUnbounded(t *testing.T) {
pod := api.Pod{
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
Status: api.PodStatus{Phase: api.PodRunning},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "0")}},
},
}
if IsPodCPUUnbounded(&pod) {
t.Errorf("Expected false")
}
pod = api.Pod{
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
Status: api.PodStatus{Phase: api.PodRunning},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("0", "0")}},
},
}
if !IsPodCPUUnbounded(&pod) {
t.Errorf("Expected true")
}
pod.Spec.Containers[0].Resources = api.ResourceRequirements{}
if !IsPodCPUUnbounded(&pod) {
t.Errorf("Expected true")
}
}
func TestIsPodMemoryUnbounded(t *testing.T) {
pod := api.Pod{
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
Status: api.PodStatus{Phase: api.PodRunning},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("0", "1Gi")}},
},
}
if IsPodMemoryUnbounded(&pod) {
t.Errorf("Expected false")
}
pod = api.Pod{
ObjectMeta: api.ObjectMeta{Name: "pod-running"},
Status: api.PodStatus{Phase: api.PodRunning},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("0", "0")}},
},
}
if !IsPodMemoryUnbounded(&pod) {
t.Errorf("Expected true")
}
pod.Spec.Containers[0].Resources = api.ResourceRequirements{}
if !IsPodMemoryUnbounded(&pod) {
t.Errorf("Expected true")
}
}

View File

@@ -36,6 +36,10 @@ type Scheme struct {
// Function to convert a field selector to internal representation. // Function to convert a field selector to internal representation.
type FieldLabelConversionFunc func(label, value string) (internalLabel, internalValue string, err error) type FieldLabelConversionFunc func(label, value string) (internalLabel, internalValue string, err error)
func (self *Scheme) Raw() *conversion.Scheme {
return self.raw
}
// fromScope gets the input version, desired output version, and desired Scheme // fromScope gets the input version, desired output version, and desired Scheme
// from a conversion.Scope. // from a conversion.Scope.
func (self *Scheme) fromScope(s conversion.Scope) (inVersion, outVersion string, scheme *Scheme) { func (self *Scheme) fromScope(s conversion.Scope) (inVersion, outVersion string, scheme *Scheme) {

View File

@@ -85,7 +85,7 @@ func NewEndpointController(client *client.Client) *EndpointController {
e.podStore.Store, e.podController = framework.NewInformer( e.podStore.Store, e.podController = framework.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func() (runtime.Object, error) { ListFunc: func() (runtime.Object, error) {
return e.client.Pods(api.NamespaceAll).List(labels.Everything()) return e.client.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
}, },
WatchFunc: func(rv string) (watch.Interface, error) { WatchFunc: func(rv string) (watch.Interface, error) {
return e.client.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv) return e.client.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)

View File

@@ -39,7 +39,10 @@ func TestCanSupport(t *testing.T) {
} }
} }
type fakeDiskManager struct{} type fakeDiskManager struct {
attachCalled bool
detachCalled bool
}
func (fake *fakeDiskManager) MakeGlobalPDName(disk iscsiDisk) string { func (fake *fakeDiskManager) MakeGlobalPDName(disk iscsiDisk) string {
return "/tmp/fake_iscsi_path" return "/tmp/fake_iscsi_path"
@@ -50,6 +53,11 @@ func (fake *fakeDiskManager) AttachDisk(disk iscsiDisk) error {
if err != nil { if err != nil {
return err return err
} }
// Simulate the global mount so that the fakeMounter returns the
// expected number of mounts for the attached disk.
disk.mounter.Mount(globalPath, globalPath, disk.fsType, 0, "")
fake.attachCalled = true
return nil return nil
} }
@@ -59,6 +67,7 @@ func (fake *fakeDiskManager) DetachDisk(disk iscsiDisk, mntPath string) error {
if err != nil { if err != nil {
return err return err
} }
fake.detachCalled = true
return nil return nil
} }
@@ -81,7 +90,9 @@ func TestPlugin(t *testing.T) {
}, },
}, },
} }
builder, err := plug.(*ISCSIPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{}) fakeManager := &fakeDiskManager{}
fakeMounter := &mount.FakeMounter{}
builder, err := plug.(*ISCSIPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Builder: %v", err)
} }
@@ -111,8 +122,12 @@ func TestPlugin(t *testing.T) {
t.Errorf("SetUp() failed: %v", err) t.Errorf("SetUp() failed: %v", err)
} }
} }
if !fakeManager.attachCalled {
t.Errorf("Attach was not called")
}
cleaner, err := plug.(*ISCSIPlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{}) fakeManager = &fakeDiskManager{}
cleaner, err := plug.(*ISCSIPlugin).newCleanerInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Cleaner: %v", err)
} }
@@ -128,4 +143,7 @@ func TestPlugin(t *testing.T) {
} else if !os.IsNotExist(err) { } else if !os.IsNotExist(err) {
t.Errorf("SetUp() failed: %v", err) t.Errorf("SetUp() failed: %v", err)
} }
if !fakeManager.detachCalled {
t.Errorf("Detach was not called")
}
} }

View File

@@ -70,7 +70,7 @@ func NewSchedulerServer() *SchedulerServer {
func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) { func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.IntVar(&s.Port, "port", s.Port, "The port that the scheduler's http service runs on") fs.IntVar(&s.Port, "port", s.Port, "The port that the scheduler's http service runs on")
fs.Var(&s.Address, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces)") fs.Var(&s.Address, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces)")
fs.StringVar(&s.AlgorithmProvider, "algorithm_provider", s.AlgorithmProvider, "The scheduling algorithm provider to use") fs.StringVar(&s.AlgorithmProvider, "algorithm_provider", s.AlgorithmProvider, "The scheduling algorithm provider to use, one of: "+factory.ListAlgorithmProviders())
fs.StringVar(&s.PolicyConfigFile, "policy_config_file", s.PolicyConfigFile, "File with scheduler policy configuration") fs.StringVar(&s.PolicyConfigFile, "policy_config_file", s.PolicyConfigFile, "File with scheduler policy configuration")
fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/") fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")

View File

@@ -171,6 +171,9 @@ func IncrementUsage(a admission.Attributes, status *api.ResourceQuotaStatus, cli
hardMem, hardMemFound := status.Hard[api.ResourceMemory] hardMem, hardMemFound := status.Hard[api.ResourceMemory]
if hardMemFound { if hardMemFound {
if set[api.ResourceMemory] && resourcequota.IsPodMemoryUnbounded(pod) {
return false, fmt.Errorf("Limited to %s memory, but pod has no specified memory limit", hardMem.String())
}
used, usedFound := status.Used[api.ResourceMemory] used, usedFound := status.Used[api.ResourceMemory]
if !usedFound { if !usedFound {
return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.") return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
@@ -184,6 +187,9 @@ func IncrementUsage(a admission.Attributes, status *api.ResourceQuotaStatus, cli
} }
hardCPU, hardCPUFound := status.Hard[api.ResourceCPU] hardCPU, hardCPUFound := status.Hard[api.ResourceCPU]
if hardCPUFound { if hardCPUFound {
if set[api.ResourceCPU] && resourcequota.IsPodCPUUnbounded(pod) {
return false, fmt.Errorf("Limited to %s CPU, but pod has no specified cpu limit", hardCPU.String())
}
used, usedFound := status.Used[api.ResourceCPU] used, usedFound := status.Used[api.ResourceCPU]
if !usedFound { if !usedFound {
return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.") return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")

View File

@@ -195,6 +195,72 @@ func TestIncrementUsageCPU(t *testing.T) {
} }
} }
func TestUnboundedCPU(t *testing.T) {
namespace := "default"
client := testclient.NewSimpleFake(&api.PodList{
Items: []api.Pod{
{
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}},
},
},
},
})
status := &api.ResourceQuotaStatus{
Hard: api.ResourceList{},
Used: api.ResourceList{},
}
r := api.ResourceCPU
status.Hard[r] = resource.MustParse("200m")
status.Used[r] = resource.MustParse("100m")
newPod := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("0m", "1Gi")}},
}}
_, err := IncrementUsage(admission.NewAttributesRecord(newPod, "Pod", namespace, "pods", "CREATE"), status, client)
if err == nil {
t.Errorf("Expected CPU unbounded usage error")
}
}
func TestUnboundedMemory(t *testing.T) {
namespace := "default"
client := testclient.NewSimpleFake(&api.PodList{
Items: []api.Pod{
{
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}},
},
},
},
})
status := &api.ResourceQuotaStatus{
Hard: api.ResourceList{},
Used: api.ResourceList{},
}
r := api.ResourceMemory
status.Hard[r] = resource.MustParse("10Gi")
status.Used[r] = resource.MustParse("1Gi")
newPod := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("250m", "0")}},
}}
_, err := IncrementUsage(admission.NewAttributesRecord(newPod, "Pod", namespace, "pods", "CREATE"), status, client)
if err == nil {
t.Errorf("Expected memory unbounded usage error")
}
}
func TestExceedUsageCPU(t *testing.T) { func TestExceedUsageCPU(t *testing.T) {
namespace := "default" namespace := "default"
client := testclient.NewSimpleFake(&api.PodList{ client := testclient.NewSimpleFake(&api.PodList{

View File

@@ -60,6 +60,8 @@ func defaultPriorities() util.StringSet {
return util.NewStringSet( return util.NewStringSet(
// Prioritize nodes by least requested utilization. // Prioritize nodes by least requested utilization.
factory.RegisterPriorityFunction("LeastRequestedPriority", algorithm.LeastRequestedPriority, 1), factory.RegisterPriorityFunction("LeastRequestedPriority", algorithm.LeastRequestedPriority, 1),
// Prioritizes nodes to help achieve balanced resource usage
factory.RegisterPriorityFunction("BalancedResourceAllocation", algorithm.BalancedResourceAllocation, 1),
// spreads pods by minimizing the number of pods (belonging to the same service) on the same minion. // spreads pods by minimizing the number of pods (belonging to the same service) on the same minion.
factory.RegisterPriorityConfigFactory( factory.RegisterPriorityConfigFactory(
"ServiceSpreadingPriority", "ServiceSpreadingPriority",

View File

@@ -19,6 +19,7 @@ package factory
import ( import (
"fmt" "fmt"
"regexp" "regexp"
"strings"
"sync" "sync"
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler" algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
@@ -300,3 +301,12 @@ func validatePriorityOrDie(priority schedulerapi.PriorityPolicy) {
} }
} }
} }
// ListAlgorithmProviders is called when listing all available algortihm providers in `kube-scheduler --help`
func ListAlgorithmProviders() string {
var availableAlgorithmProviders []string
for name := range algorithmProviderMap {
availableAlgorithmProviders = append(availableAlgorithmProviders, name)
}
return strings.Join(availableAlgorithmProviders, " | ")
}

View File

@@ -35,14 +35,14 @@ import (
) )
// Convenient wrapper around listing pods supporting retries. // Convenient wrapper around listing pods supporting retries.
func listPods(c *client.Client, namespace string, label labels.Selector) (*api.PodList, error) { func listPods(c *client.Client, namespace string, label labels.Selector, field fields.Selector) (*api.PodList, error) {
maxRetries := 4 maxRetries := 4
pods, err := c.Pods(namespace).List(label) pods, err := c.Pods(namespace).List(label, field)
for i := 0; i < maxRetries; i++ { for i := 0; i < maxRetries; i++ {
if err == nil { if err == nil {
return pods, nil return pods, nil
} }
pods, err = c.Pods(namespace).List(label) pods, err = c.Pods(namespace).List(label, field)
} }
return pods, err return pods, err
} }
@@ -127,7 +127,7 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int) {
By(fmt.Sprintf("Making sure all %d replicas exist", replicas)) By(fmt.Sprintf("Making sure all %d replicas exist", replicas))
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := listPods(c, ns, label) pods, err := listPods(c, ns, label, fields.Everything())
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
current = len(pods.Items) current = len(pods.Items)
failCount := 5 failCount := 5
@@ -147,7 +147,7 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int) {
last = current last = current
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
pods, err = listPods(c, ns, label) pods, err = listPods(c, ns, label, fields.Everything())
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
current = len(pods.Items) current = len(pods.Items)
} }
@@ -166,7 +166,7 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int) {
unknown := 0 unknown := 0
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
currentPods, listErr := listPods(c, ns, label) currentPods, listErr := listPods(c, ns, label, fields.Everything())
Expect(listErr).NotTo(HaveOccurred()) Expect(listErr).NotTo(HaveOccurred())
if len(currentPods.Items) != len(pods.Items) { if len(currentPods.Items) != len(pods.Items) {
Failf("Number of reported pods changed: %d vs %d", len(currentPods.Items), len(pods.Items)) Failf("Number of reported pods changed: %d vs %d", len(currentPods.Items), len(pods.Items))

View File

@@ -83,7 +83,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
// Wait for the Elasticsearch pods to enter the running state. // Wait for the Elasticsearch pods to enter the running state.
By("Checking to make sure the Elasticsearch pods are running") By("Checking to make sure the Elasticsearch pods are running")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "elasticsearch-logging"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "elasticsearch-logging"}))
pods, err := c.Pods(api.NamespaceDefault).List(label) pods, err := c.Pods(api.NamespaceDefault).List(label, fields.Everything())
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items { for _, pod := range pods.Items {
err = waitForPodRunning(c, pod.Name) err = waitForPodRunning(c, pod.Name)

View File

@@ -78,7 +78,7 @@ var _ = Describe("Events", func() {
expectNoError(waitForPodRunning(c, pod.Name)) expectNoError(waitForPodRunning(c, pod.Name))
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))) pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything())
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
By("retrieving the pod") By("retrieving the pod")

View File

@@ -86,7 +86,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
return nil, fmt.Errorf("expected to find only one replica for rc %q, found %d", rc.Name, rc.Status.Replicas) return nil, fmt.Errorf("expected to find only one replica for rc %q, found %d", rc.Name, rc.Status.Replicas)
} }
expectedRcs[rc.Name] = true expectedRcs[rc.Name] = true
podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector()) podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -85,7 +85,7 @@ var _ = Describe("Networking", func() {
//Assert basic external connectivity. //Assert basic external connectivity.
//Since this is not really a test of kubernetes in any way, we //Since this is not really a test of kubernetes in any way, we
//leave it as a pre-test assertion, rather than a Ginko test. //leave it as a pre-test assertion, rather than a Ginko test.
By("Executing a successfull http request from the external internet") By("Executing a successful http request from the external internet")
resp, err := http.Get("http://google.com") resp, err := http.Get("http://google.com")
if err != nil { if err != nil {
Failf("Unable to connect/talk to the internet: %v", err) Failf("Unable to connect/talk to the internet: %v", err)

View File

@@ -174,7 +174,7 @@ var _ = Describe("Pods", func() {
} }
By("setting up watch") By("setting up watch")
pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))) pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything())
if err != nil { if err != nil {
Fail(fmt.Sprintf("Failed to query for pods: %v", err)) Fail(fmt.Sprintf("Failed to query for pods: %v", err))
} }
@@ -196,7 +196,7 @@ var _ = Describe("Pods", func() {
} }
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))) pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything())
if err != nil { if err != nil {
Fail(fmt.Sprintf("Failed to query for pods: %v", err)) Fail(fmt.Sprintf("Failed to query for pods: %v", err))
} }
@@ -214,7 +214,7 @@ var _ = Describe("Pods", func() {
By("deleting the pod") By("deleting the pod")
podClient.Delete(pod.Name) podClient.Delete(pod.Name)
pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))) pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything())
if err != nil { if err != nil {
Fail(fmt.Sprintf("Failed to delete pod: %v", err)) Fail(fmt.Sprintf("Failed to delete pod: %v", err))
} }
@@ -286,7 +286,7 @@ var _ = Describe("Pods", func() {
expectNoError(waitForPodRunning(c, pod.Name)) expectNoError(waitForPodRunning(c, pod.Name))
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))) pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything())
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
By("retrieving the pod") By("retrieving the pod")
@@ -309,7 +309,7 @@ var _ = Describe("Pods", func() {
expectNoError(waitForPodRunning(c, pod.Name)) expectNoError(waitForPodRunning(c, pod.Name))
By("verifying the updated pod is in kubernetes") By("verifying the updated pod is in kubernetes")
pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))) pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything())
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
fmt.Println("pod update OK") fmt.Println("pod update OK")
}) })

View File

@@ -22,6 +22,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
@@ -110,7 +111,7 @@ func ServeImageOrFail(c *client.Client, test string, image string) {
// List the pods, making sure we observe all the replicas. // List the pods, making sure we observe all the replicas.
listTimeout := time.Minute listTimeout := time.Minute
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := c.Pods(ns).List(label) pods, err := c.Pods(ns).List(label, fields.Everything())
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
t := time.Now() t := time.Now()
for { for {
@@ -123,7 +124,7 @@ func ServeImageOrFail(c *client.Client, test string, image string) {
name, replicas, len(pods.Items), time.Since(t).Seconds()) name, replicas, len(pods.Items), time.Since(t).Seconds())
} }
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
pods, err = c.Pods(ns).List(label) pods, err = c.Pods(ns).List(label, fields.Everything())
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
@@ -165,7 +166,7 @@ type responseChecker struct {
func (r responseChecker) checkAllResponses() (done bool, err error) { func (r responseChecker) checkAllResponses() (done bool, err error) {
successes := 0 successes := 0
currentPods, err := r.c.Pods(r.ns).List(r.label) currentPods, err := r.c.Pods(r.ns).List(r.label, fields.Everything())
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for i, pod := range r.pods.Items { for i, pod := range r.pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems. // Check that the replica list remains unchanged, otherwise we have problems.

View File

@@ -257,7 +257,7 @@ var _ = Describe("Services", func() {
It("should be able to create a functioning external load balancer", func() { It("should be able to create a functioning external load balancer", func() {
serviceName := "external-lb-test" serviceName := "external-lb-test"
ns := api.NamespaceDefault ns := namespace0
labels := map[string]string{ labels := map[string]string{
"key0": "value0", "key0": "value0",
} }
@@ -275,9 +275,6 @@ var _ = Describe("Services", func() {
}, },
} }
By("cleaning up previous service " + serviceName + " from namespace " + ns)
c.Services(ns).Delete(serviceName)
By("creating service " + serviceName + " with external load balancer in namespace " + ns) By("creating service " + serviceName + " with external load balancer in namespace " + ns)
result, err := c.Services(ns).Create(service) result, err := c.Services(ns).Create(service)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@@ -317,7 +314,7 @@ var _ = Describe("Services", func() {
} }
By("creating pod to be part of service " + serviceName) By("creating pod to be part of service " + serviceName)
podClient := c.Pods(api.NamespaceDefault) podClient := c.Pods(ns)
defer func() { defer func() {
By("deleting pod " + pod.Name) By("deleting pod " + pod.Name)
defer GinkgoRecover() defer GinkgoRecover()
@@ -326,7 +323,7 @@ var _ = Describe("Services", func() {
if _, err := podClient.Create(pod); err != nil { if _, err := podClient.Create(pod); err != nil {
Failf("Failed to create pod %s: %v", pod.Name, err) Failf("Failed to create pod %s: %v", pod.Name, err)
} }
expectNoError(waitForPodRunning(c, pod.Name)) expectNoError(waitForPodRunningInNamespace(c, pod.Name, ns))
By("hitting the pod through the service's external load balancer") By("hitting the pod through the service's external load balancer")
var resp *http.Response var resp *http.Response

View File

@@ -56,7 +56,7 @@ func TestClient(t *testing.T) {
t.Errorf("expected %#v, got %#v", e, a) t.Errorf("expected %#v, got %#v", e, a)
} }
pods, err := client.Pods(ns).List(labels.Everything()) pods, err := client.Pods(ns).List(labels.Everything(), fields.Everything())
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
@@ -94,7 +94,7 @@ func TestClient(t *testing.T) {
} }
// pod is shown, but not scheduled // pod is shown, but not scheduled
pods, err = client.Pods(ns).List(labels.Everything()) pods, err = client.Pods(ns).List(labels.Everything(), fields.Everything())
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }

View File

@@ -28,6 +28,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
@@ -111,7 +112,7 @@ func TestApiserverMetrics(t *testing.T) {
// Make a request to the apiserver to ensure there's at least one data point // Make a request to the apiserver to ensure there's at least one data point
// for the metrics we're expecting -- otherwise, they won't be exported. // for the metrics we're expecting -- otherwise, they won't be exported.
client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()})
if _, err := client.Pods(api.NamespaceDefault).List(labels.Everything()); err != nil { if _, err := client.Pods(api.NamespaceDefault).List(labels.Everything(), fields.Everything()); err != nil {
t.Fatalf("unexpected error getting pods: %v", err) t.Fatalf("unexpected error getting pods: %v", err)
} }

View File

@@ -39,7 +39,7 @@ func init() {
func deletePodOrErrorf(t *testing.T, c *client.Client, ns, name string) { func deletePodOrErrorf(t *testing.T, c *client.Client, ns, name string) {
if err := c.Pods(ns).Delete(name); err != nil { if err := c.Pods(ns).Delete(name); err != nil {
t.Errorf("unable to delete pods %v: %v", name, err) t.Errorf("unable to delete pod %v: %v", name, err)
} }
} }
func deleteSecretOrErrorf(t *testing.T, c *client.Client, ns, name string) { func deleteSecretOrErrorf(t *testing.T, c *client.Client, ns, name string) {
@@ -136,7 +136,7 @@ func DoTestSecrets(t *testing.T, client *client.Client, apiVersion string) {
defer deletePodOrErrorf(t, client, ns, pod.Name) defer deletePodOrErrorf(t, client, ns, pod.Name)
// Create a pod that consumes non-existent secret. // Create a pod that consumes non-existent secret.
pod.ObjectMeta.Name = "uses-non-existant-secret" pod.ObjectMeta.Name = "uses-non-existent-secret"
if _, err := client.Pods(ns).Create(pod); err != nil { if _, err := client.Pods(ns).Create(pod); err != nil {
t.Errorf("Failed to create pod: %v", err) t.Errorf("Failed to create pod: %v", err)
} }

View File

@@ -30,7 +30,7 @@ Bower components should be refernced in one of the `vendor.json` files below:
### Serving the app during development ### Serving the app during development
The app can be served through `kubectl`, but for some types of review a local web server is convenient. One can be installed as follows: For development you can serve the files locally by installing a webserver as follows:
``` ```
sudo npm install -g http-server sudo npm install -g http-server
@@ -43,6 +43,9 @@ cd app
http-server -a localhost -p 8000 http-server -a localhost -p 8000
``` ```
### Serving the app in production
https://<kubernetes-master>/static/app/
### Configuration ### Configuration
#### Configuration settings #### Configuration settings
A json file can be used by `gulp` to automatically create angular constants. This is useful for setting per environment variables such as api endpoints. A json file can be used by `gulp` to automatically create angular constants. This is useful for setting per environment variables such as api endpoints.
@@ -57,7 +60,6 @@ www/master
├── shared/config/development.json ├── shared/config/development.json
└── components └── components
├── dashboard/config/development.json ├── dashboard/config/development.json
├── graph/config/development.json
└── my_component/config/development.json └── my_component/config/development.json
``` ```
produces ```www/master/shared/config/generated-config.js```: produces ```www/master/shared/config/generated-config.js```:
@@ -66,14 +68,16 @@ angular.module('kubernetesApp.config', [])
.constant('ENV', { .constant('ENV', {
'/': <www/master/shared/config/development.json>, '/': <www/master/shared/config/development.json>,
'dashboard': <www/master/components/dashboard/config/development.json>, 'dashboard': <www/master/components/dashboard/config/development.json>,
'graph': <www/master/components/graph/config/development.json>,
'my_component': <www/master/components/my_component/config/development.json> 'my_component': <www/master/components/my_component/config/development.json>
}); });
``` ```
#### Kubernetes server configuration #### Kubernetes server configuration
**RECOMMENDED**: By default the Kubernetes api server does not support CORS, You'll need to run ```hack/build-ui.sh``` to create a new ```pkg/ui/datafile.go``` file.
This is the file that is built-in to the kube-apiserver.
**RECOMMENDED**: When working in development mode the Kubernetes api server does not support CORS,
so the `kube-apiserver.service` must be started with so the `kube-apiserver.service` must be started with
`--cors_allowed_origins=.*` or `--cors_allowed_origins=http://<your `--cors_allowed_origins=.*` or `--cors_allowed_origins=http://<your
host here>` host here>`
@@ -87,7 +91,7 @@ angular.module('kubernetesApp.config', [])
See [master/components/README.md](master/components/README.md). See [master/components/README.md](master/components/README.md).
### Testing ### Testing
Currently kuberntes-ui includes both unit-testing (run via [Karma](http://karma-runner.github.io/0.12/index.html)) and Currently kubernetes/www includes both unit-testing (run via [Karma](http://karma-runner.github.io/0.12/index.html)) and
end-to-end testing (run via end-to-end testing (run via
[Protractor](http://angular.github.io/protractor/#/)). [Protractor](http://angular.github.io/protractor/#/)).