Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Vincenzo D'Amore
2015-09-14 15:36:35 +02:00
296 changed files with 7389 additions and 2082 deletions

View File

@@ -13171,7 +13171,11 @@
"properties": { "properties": {
"reason": { "reason": {
"type": "string", "type": "string",
"description": "(brief) reason the container is not yet running, such as pulling its image." "description": "(brief) reason the container is not yet running."
},
"message": {
"type": "string",
"description": "Message regarding why the container is not yet running."
} }
} }
}, },
@@ -13662,6 +13666,10 @@
"sessionAffinity": { "sessionAffinity": {
"type": "string", "type": "string",
"description": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies" "description": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies"
},
"loadBalancerIP": {
"type": "string",
"description": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature."
} }
} }
}, },

View File

@@ -835,6 +835,8 @@ function kube::release::package_full_tarball() {
cp "${KUBE_ROOT}/README.md" "${release_stage}/" cp "${KUBE_ROOT}/README.md" "${release_stage}/"
cp "${KUBE_ROOT}/LICENSE" "${release_stage}/" cp "${KUBE_ROOT}/LICENSE" "${release_stage}/"
cp "${KUBE_ROOT}/Vagrantfile" "${release_stage}/" cp "${KUBE_ROOT}/Vagrantfile" "${release_stage}/"
mkdir -p "${release_stage}/contrib/completions/bash"
cp "${KUBE_ROOT}/contrib/completions/bash/kubectl" "${release_stage}/contrib/completions/bash"
kube::release::clean_cruft kube::release::clean_cruft

View File

@@ -43,17 +43,12 @@ For a regular service, this resolves to the port number and the CNAME:
`my-svc.my-namespace.svc.cluster.local`. `my-svc.my-namespace.svc.cluster.local`.
For a headless service, this resolves to multiple answers, one for each pod For a headless service, this resolves to multiple answers, one for each pod
that is backing the service, and contains the port number and a CNAME of the pod that is backing the service, and contains the port number and a CNAME of the pod
with the format `auto-generated-name.my-svc.my-namespace.svc.cluster.local` of the form `auto-generated-name.my-svc.my-namespace.svc.cluster.local`.
SRV records always contain the 'svc' segment in them and are not supported for
old-style CNAMEs where the 'svc' segment was omitted.
### Backwards compatibility ### Backwards compatibility
Previous versions of kube-dns made names of the for Previous versions of kube-dns made names of the for
`my-svc.my-namespace.cluster.local` (the 'svc' level was added later). For `my-svc.my-namespace.cluster.local` (the 'svc' level was added later). This
compatibility, kube-dns supports both names for the time being. Users should is no longer supported.
avoid creating a namespace named 'svc', to avoid conflicts. The old name
format is deprecated and will be removed in a future release.
## How do I find the DNS server? ## How do I find the DNS server?
The DNS server itself runs as a Kubernetes Service. This gives it a stable IP The DNS server itself runs as a Kubernetes Service. This gives it a stable IP
@@ -178,6 +173,11 @@ paths to the node's own DNS settings. If the node is able to resolve DNS names
specific to the larger environment, pods should be able to, also. See "Known specific to the larger environment, pods should be able to, also. See "Known
issues" below for a caveat. issues" below for a caveat.
If you don't want this, or if you want a different DNS config for pods, you can
use the kubelet's `--resolv-conf` flag. Setting it to "" means that pods will
not inherit DNS. Setting it to a valid file path means that kubelet will use
this file instead of `/etc/resolv.conf` for DNS inheritance.
## Known issues ## Known issues
Kubernetes installs do not configure the nodes' resolv.conf files to use the Kubernetes installs do not configure the nodes' resolv.conf files to use the
cluster DNS by default, because that process is inherently distro-specific. cluster DNS by default, because that process is inherently distro-specific.
@@ -190,7 +190,7 @@ consume 1 `nameserver` record and 3 `search` records. This means that if a
local installation already uses 3 `nameserver`s or uses more than 3 `search`es, local installation already uses 3 `nameserver`s or uses more than 3 `search`es,
some of those settings will be lost. As a partial workaround, the node can run some of those settings will be lost. As a partial workaround, the node can run
`dnsmasq` which will provide more `nameserver` entries, but not more `search` `dnsmasq` which will provide more `nameserver` entries, but not more `search`
entries. entries. You can also use kubelet's `--resolv-conf` flag.
## Making changes ## Making changes
Please observe the release process for making changes to the `kube2sky` Please observe the release process for making changes to the `kube2sky`

View File

@@ -35,8 +35,8 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
skymsg "github.com/skynetservices/skydns/msg" skymsg "github.com/skynetservices/skydns/msg"
kapi "k8s.io/kubernetes/pkg/api" kapi "k8s.io/kubernetes/pkg/api"
kcache "k8s.io/kubernetes/pkg/client/cache"
kclient "k8s.io/kubernetes/pkg/client/unversioned" kclient "k8s.io/kubernetes/pkg/client/unversioned"
kcache "k8s.io/kubernetes/pkg/client/unversioned/cache"
kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
kframework "k8s.io/kubernetes/pkg/controller/framework" kframework "k8s.io/kubernetes/pkg/controller/framework"
kSelector "k8s.io/kubernetes/pkg/fields" kSelector "k8s.io/kubernetes/pkg/fields"

View File

@@ -29,7 +29,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
kapi "k8s.io/kubernetes/pkg/api" kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/client/cache"
) )
type fakeEtcdClient struct { type fakeEtcdClient struct {

View File

@@ -118,45 +118,51 @@ else
# This is the best option, but it is sadly broken on most distros # This is the best option, but it is sadly broken on most distros
# Bug: https://github.com/docker/docker/issues/4036 # Bug: https://github.com/docker/docker/issues/4036
# 95% goes to the docker thin-pool # 80% goes to the docker thin-pool; we want to leave some space for host-volumes
lvcreate -l 95%VG --thinpool docker-thinpool vg-ephemeral lvcreate -l 80%VG --thinpool docker-thinpool vg-ephemeral
DOCKER_OPTS="${DOCKER_OPTS} --storage-opt dm.thinpooldev=/dev/mapper/vg--ephemeral-docker--thinpool" DOCKER_OPTS="${DOCKER_OPTS} --storage-opt dm.thinpooldev=/dev/mapper/vg--ephemeral-docker--thinpool"
# Note that we don't move docker; docker goes direct to the thinpool # Note that we don't move docker; docker goes direct to the thinpool
else
# Remaining space (20%) is for kubernetes data
# TODO: Should this be a thin pool? e.g. would we ever want to snapshot this data?
lvcreate -l 100%FREE -n kubernetes vg-ephemeral
mkfs -t ext4 /dev/vg-ephemeral/kubernetes
mkdir -p /mnt/ephemeral/kubernetes
echo "/dev/vg-ephemeral/kubernetes /mnt/ephemeral/kubernetes ext4 noatime 0 0" >> /etc/fstab
mount /mnt/ephemeral/kubernetes
move_kubelet="/mnt/ephemeral/kubernetes"
else
# aufs # aufs
# Create a docker lv, use docker on it # We used to split docker & kubernetes, but we no longer do that, because
# 95% goes to the docker thin-pool # host volumes go into the kubernetes area, and it is otherwise very easy
# to fill up small volumes.
release=`lsb_release -c -s` release=`lsb_release -c -s`
if [[ "${release}" != "wheezy" ]] ; then if [[ "${release}" != "wheezy" ]] ; then
lvcreate -l 95%VG --thinpool docker-thinpool vg-ephemeral lvcreate -l 100%FREE --thinpool pool-ephemeral vg-ephemeral
THINPOOL_SIZE=$(lvs vg-ephemeral/docker-thinpool -o LV_SIZE --noheadings --units M --nosuffix) THINPOOL_SIZE=$(lvs vg-ephemeral/pool-ephemeral -o LV_SIZE --noheadings --units M --nosuffix)
lvcreate -V${THINPOOL_SIZE}M -T vg-ephemeral/docker-thinpool -n docker lvcreate -V${THINPOOL_SIZE}M -T vg-ephemeral/pool-ephemeral -n ephemeral
else else
# Thin provisioning not supported by Wheezy # Thin provisioning not supported by Wheezy
echo "Detected wheezy; won't use LVM thin provisioning" echo "Detected wheezy; won't use LVM thin provisioning"
lvcreate -l 95%VG -n docker vg-ephemeral lvcreate -l 100%VG -n ephemeral vg-ephemeral
fi fi
mkfs -t ext4 /dev/vg-ephemeral/docker mkfs -t ext4 /dev/vg-ephemeral/ephemeral
mkdir -p /mnt/ephemeral/docker mkdir -p /mnt/ephemeral
echo "/dev/vg-ephemeral/docker /mnt/ephemeral/docker ext4 noatime 0 0" >> /etc/fstab echo "/dev/vg-ephemeral/ephemeral /mnt/ephemeral ext4 noatime 0 0" >> /etc/fstab
mount /mnt/ephemeral/docker mount /mnt/ephemeral
mkdir -p /mnt/ephemeral/kubernetes
move_docker="/mnt/ephemeral" move_docker="/mnt/ephemeral"
fi move_kubelet="/mnt/ephemeral/kubernetes"
fi
# Remaining 5% is for kubernetes data else
# TODO: Should this be a thin pool? e.g. would we ever want to snapshot this data?
lvcreate -l 100%FREE -n kubernetes vg-ephemeral
mkfs -t ext4 /dev/vg-ephemeral/kubernetes
mkdir -p /mnt/ephemeral/kubernetes
echo "/dev/vg-ephemeral/kubernetes /mnt/ephemeral/kubernetes ext4 noatime 0 0" >> /etc/fstab
mount /mnt/ephemeral/kubernetes
move_kubelet="/mnt/ephemeral/kubernetes"
else
echo "Ignoring unknown DOCKER_STORAGE: ${docker_storage}" echo "Ignoring unknown DOCKER_STORAGE: ${docker_storage}"
fi fi
fi fi

View File

@@ -44,6 +44,8 @@ MINION_TAG="${INSTANCE_PREFIX}-minion"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}"
MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}"
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
ENABLE_EXPERIMENTAL_API="${KUBE_ENABLE_EXPERIMENTAL_API:-false}"
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
POLL_SLEEP_INTERVAL=3 POLL_SLEEP_INTERVAL=3
@@ -87,7 +89,6 @@ CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Optional: Create autoscaler for cluster's nodes. # Optional: Create autoscaler for cluster's nodes.
# NOT WORKING YET!
ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}" ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
@@ -95,6 +96,13 @@ if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
fi fi
# Optional: Enable feature for autoscaling number of pods
# Experimental feature, not ready for production use.
ENABLE_HORIZONTAL_POD_AUTOSCALER="${KUBE_ENABLE_HORIZONTAL_POD_AUTOSCALER:-false}"
if [[ "${ENABLE_HORIZONTAL_POD_AUTOSCALER}" == "true" ]]; then
ENABLE_EXPERIMENTAL_API=true
fi
# Admission Controllers to invoke prior to persisting objects in cluster # Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota

View File

@@ -45,6 +45,9 @@ MINION_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}" CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}" MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}"
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
ENABLE_EXPERIMENTAL_API="${KUBE_ENABLE_EXPERIMENTAL_API:-false}"
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
POLL_SLEEP_INTERVAL=3 POLL_SLEEP_INTERVAL=3
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
@@ -59,7 +62,10 @@ TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}"
KUBELET_TEST_ARGS="--max-pods=100 $TEST_CLUSTER_LOG_LEVEL" KUBELET_TEST_ARGS="--max-pods=100 $TEST_CLUSTER_LOG_LEVEL"
APISERVER_TEST_ARGS="--runtime-config=experimental/v1 ${TEST_CLUSTER_LOG_LEVEL}" APISERVER_TEST_ARGS="--runtime-config=experimental/v1 ${TEST_CLUSTER_LOG_LEVEL}"
CONTROLLER_MANAGER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}" # pod-eviction-timeout is currently 2 * node-monitor-grace-period to allow for some network
# problems, but don't ensure that the Kubelet can be restarted without evicting Pods. We don't
# think it's necessary for tests.
CONTROLLER_MANAGER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL} --pod-eviction-timeout=1m20s"
SCHEDULER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}" SCHEDULER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}"
KUBEPROXY_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}" KUBEPROXY_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}"
@@ -92,7 +98,6 @@ CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Optional: Create autoscaler for cluster's nodes. # Optional: Create autoscaler for cluster's nodes.
# NOT WORKING YET!
ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}" ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}" AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
@@ -100,6 +105,13 @@ if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
fi fi
# Optional: Enable feature for autoscaling number of pods
# Experimental feature, not ready for production use.
ENABLE_HORIZONTAL_POD_AUTOSCALER="${KUBE_ENABLE_HORIZONTAL_POD_AUTOSCALER:-false}"
if [[ "${ENABLE_HORIZONTAL_POD_AUTOSCALER}" == "true" ]]; then
ENABLE_EXPERIMENTAL_API=true
fi
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
# Optional: if set to true kube-up will automatically check for existing resources and clean them up. # Optional: if set to true kube-up will automatically check for existing resources and clean them up.

View File

@@ -310,6 +310,11 @@ EOF
cluster_registry_disk_type: gce cluster_registry_disk_type: gce
cluster_registry_disk_size: $(convert-bytes-gce-kube ${CLUSTER_REGISTRY_DISK_SIZE}) cluster_registry_disk_size: $(convert-bytes-gce-kube ${CLUSTER_REGISTRY_DISK_SIZE})
cluster_registry_disk_name: ${CLUSTER_REGISTRY_DISK} cluster_registry_disk_name: ${CLUSTER_REGISTRY_DISK}
EOF
fi
if [ -n "${ENABLE_HORIZONTAL_POD_AUTOSCALER:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
enable_horizontal_pod_autoscaler: '$(echo "$ENABLE_HORIZONTAL_POD_AUTOSCALER" | sed -e "s/'/''/g")'
EOF EOF
fi fi
} }
@@ -568,6 +573,11 @@ EOF
# CIDR range. # CIDR range.
cat <<EOF >>/etc/salt/minion.d/grains.conf cat <<EOF >>/etc/salt/minion.d/grains.conf
cbr-cidr: ${MASTER_IP_RANGE} cbr-cidr: ${MASTER_IP_RANGE}
EOF
fi
if [[ ! -z "${RUNTIME_CONFIG:-}" ]]; then
cat <<EOF >>/etc/salt/minion.d/grains.conf
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
EOF EOF
fi fi
} }

View File

@@ -54,6 +54,8 @@ KUBELET_TOKEN: $(yaml-quote ${KUBELET_TOKEN:-})
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-}) KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-}) ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE}) MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
ENABLE_HORIZONTAL_POD_AUTOSCALER: $(yaml-quote ${ENABLE_HORIZONTAL_POD_AUTOSCALER})
RUNTIME_CONFIG: $(yaml-quote ${RUNTIME_CONFIG})
KUBERNETES_MASTER_NAME: $(yaml-quote ${MASTER_NAME}) KUBERNETES_MASTER_NAME: $(yaml-quote ${MASTER_NAME})
KUBERNETES_CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME}) KUBERNETES_CONTAINER_RUNTIME: $(yaml-quote ${CONTAINER_RUNTIME})
RKT_VERSION: $(yaml-quote ${RKT_VERSION}) RKT_VERSION: $(yaml-quote ${RKT_VERSION})

View File

@@ -51,6 +51,8 @@ KUBELET_TOKEN: $(yaml-quote ${KUBELET_TOKEN:-})
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-}) KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-}) ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE}) MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
ENABLE_HORIZONTAL_POD_AUTOSCALER: $(yaml-quote ${ENABLE_HORIZONTAL_POD_AUTOSCALER})
RUNTIME_CONFIG: $(yaml-quote ${RUNTIME_CONFIG})
CA_CERT: $(yaml-quote ${CA_CERT_BASE64:-}) CA_CERT: $(yaml-quote ${CA_CERT_BASE64:-})
KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-}) KUBELET_CERT: $(yaml-quote ${KUBELET_CERT_BASE64:-})
KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-}) KUBELET_KEY: $(yaml-quote ${KUBELET_KEY_BASE64:-})

View File

@@ -53,6 +53,18 @@ function join_csv {
# Verify prereqs # Verify prereqs
function verify-prereqs { function verify-prereqs {
if [[ "${ENABLE_EXPERIMENTAL_API}" == "true" ]]; then
if [[ -z "${RUNTIME_CONFIG}" ]]; then
RUNTIME_CONFIG="experimental/v1=true"
else
# TODO: add checking if RUNTIME_CONFIG contains "experimental/v1=false" and appending "experimental/v1=true" if not.
if echo "${RUNTIME_CONFIG}" | grep -q -v "experimental/v1=true"; then
echo "Experimental API should be turned on, but is not turned on in RUNTIME_CONFIG!"
exit 1
fi
fi
fi
local cmd local cmd
for cmd in gcloud gsutil; do for cmd in gcloud gsutil; do
if ! which "${cmd}" >/dev/null; then if ! which "${cmd}" >/dev/null; then
@@ -465,6 +477,7 @@ function write-master-env {
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then
KUBELET_APISERVER="${MASTER_NAME}" KUBELET_APISERVER="${MASTER_NAME}"
fi fi
build-kube-env true "${KUBE_TEMP}/master-kube-env.yaml" build-kube-env true "${KUBE_TEMP}/master-kube-env.yaml"
} }

View File

@@ -25,7 +25,7 @@ NETWORK="${NETWORK:-default}"
NETWORK_RANGE="${NETWORK_RANGE:-10.240.0.0/16}" NETWORK_RANGE="${NETWORK_RANGE:-10.240.0.0/16}"
FIREWALL_SSH="${FIREWALL_SSH:-${NETWORK}-allow-ssh}" FIREWALL_SSH="${FIREWALL_SSH:-${NETWORK}-allow-ssh}"
GCLOUD="${GCLOUD:-gcloud}" GCLOUD="${GCLOUD:-gcloud}"
CMD_GROUP="${CMD_GROUP:-beta}" CMD_GROUP="${CMD_GROUP:-}"
GCLOUD_CONFIG_DIR="${GCLOUD_CONFIG_DIR:-${HOME}/.config/gcloud/kubernetes}" GCLOUD_CONFIG_DIR="${GCLOUD_CONFIG_DIR:-${HOME}/.config/gcloud/kubernetes}"
MINION_SCOPES="${MINION_SCOPES:-"compute-rw,storage-ro"}" MINION_SCOPES="${MINION_SCOPES:-"compute-rw,storage-ro"}"
MACHINE_TYPE="${MACHINE_TYPE:-n1-standard-1}" MACHINE_TYPE="${MACHINE_TYPE:-n1-standard-1}"

View File

@@ -99,7 +99,7 @@ function verify-prereqs() {
sudo_prefix="sudo" sudo_prefix="sudo"
fi fi
${sudo_prefix} gcloud ${gcloud_prompt:-} components update preview || true ${sudo_prefix} gcloud ${gcloud_prompt:-} components update preview || true
${sudo_prefix} gcloud ${gcloud_prompt:-} components update "${CMD_GROUP}"|| true ${sudo_prefix} gcloud ${gcloud_prompt:-} components update ${CMD_GROUP:-} || true
${sudo_prefix} gcloud ${gcloud_prompt:-} components update kubectl|| true ${sudo_prefix} gcloud ${gcloud_prompt:-} components update kubectl|| true
${sudo_prefix} gcloud ${gcloud_prompt:-} components update || true ${sudo_prefix} gcloud ${gcloud_prompt:-} components update || true
} }
@@ -150,7 +150,7 @@ function kube-up() {
) )
# Bring up the cluster. # Bring up the cluster.
"${GCLOUD}" "${CMD_GROUP}" container clusters create "${CLUSTER_NAME}" "${create_args[@]}" "${GCLOUD}" ${CMD_GROUP:-} container clusters create "${CLUSTER_NAME}" "${create_args[@]}"
} }
# Execute prior to running tests to initialize required structure. This is # Execute prior to running tests to initialize required structure. This is
@@ -200,7 +200,7 @@ function test-setup() {
function detect-master() { function detect-master() {
echo "... in gke:detect-master()" >&2 echo "... in gke:detect-master()" >&2
detect-project >&2 detect-project >&2
KUBE_MASTER_IP=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \ KUBE_MASTER_IP=$("${GCLOUD}" ${CMD_GROUP:-} container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \ --project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep endpoint | cut -f 2 -d ' ') | grep endpoint | cut -f 2 -d ' ')
} }
@@ -242,7 +242,7 @@ function detect-minion-names {
# NODE_INSTANCE_GROUP # NODE_INSTANCE_GROUP
function detect-node-instance-group { function detect-node-instance-group {
echo "... in gke:detect-node-instance-group()" >&2 echo "... in gke:detect-node-instance-group()" >&2
NODE_INSTANCE_GROUP=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \ NODE_INSTANCE_GROUP=$("${GCLOUD}" ${CMD_GROUP:-} container clusters describe \
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \ --project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
| grep instanceGroupManagers | cut -d '/' -f 11) | grep instanceGroupManagers | cut -d '/' -f 11)
} }
@@ -318,6 +318,6 @@ function test-teardown() {
function kube-down() { function kube-down() {
echo "... in gke:kube-down()" >&2 echo "... in gke:kube-down()" >&2
detect-project >&2 detect-project >&2
"${GCLOUD}" "${CMD_GROUP}" container clusters delete --project="${PROJECT}" \ "${GCLOUD}" ${CMD_GROUP:-} container clusters delete --project="${PROJECT}" \
--zone="${ZONE}" "${CLUSTER_NAME}" --quiet --zone="${ZONE}" "${CLUSTER_NAME}" --quiet
} }

View File

@@ -84,10 +84,10 @@ net.ipv4.ip_forward:
# #
# To change: # To change:
# #
# 1. Find new deb name with: # 1. Find new deb name at:
# curl https://get.docker.com/ubuntu/dists/docker/main/binary-amd64/Packages # http://apt.dockerproject.org/repo/pool/main/d/docker-engine
# 2. Download based on that: # 2. Download based on that:
# curl -O https://get.docker.com/ubuntu/pool/main/<...> # curl -O http://apt.dockerproject.org/repo/pool/main/d/docker-engine/<deb>
# 3. Upload to GCS: # 3. Upload to GCS:
# gsutil cp <deb> gs://kubernetes-release/docker/<deb> # gsutil cp <deb> gs://kubernetes-release/docker/<deb>
# 4. Make it world readable: # 4. Make it world readable:
@@ -99,16 +99,22 @@ net.ipv4.ip_forward:
{% set storage_base='https://storage.googleapis.com/kubernetes-release/docker/' %} {% set storage_base='https://storage.googleapis.com/kubernetes-release/docker/' %}
# Only upgrade Docker to 1.8.2 for the containerVM image.
# TODO(dchen1107): For release 1.1, we want to update the ContainerVM image to
# include Docker 1.8.2 and comment out the upgrade below.
{% if grains.get('cloud', '') == 'gce'
and grains.get('os_family', '') == 'Debian'
and grains.get('oscodename', '') == 'wheezy' -%}
{% set docker_pkg_name='docker-engine' %}
{% set override_deb='docker-engine_1.8.2-0~wheezy_amd64.deb' %}
{% set override_deb_sha1='dcff80bffcbde458508da58d2a9fe7bef8eed404' %}
{% set override_docker_ver='1.8.2-0~wheezy' %}
{% else %}
{% set docker_pkg_name='lxc-docker-1.7.1' %}
{% set override_docker_ver='1.7.1' %}
{% set override_deb='lxc-docker-1.7.1_1.7.1_amd64.deb' %} {% set override_deb='lxc-docker-1.7.1_1.7.1_amd64.deb' %}
{% set override_deb_sha1='81abef31dd2c616883a61f85bfb294d743b1c889' %} {% set override_deb_sha1='81abef31dd2c616883a61f85bfb294d743b1c889' %}
{% set override_docker_ver='1.7.1' %} {% endif %}
# Comment out below logic for master branch, so that we can upgrade GCE cluster
# to docker 1.7.1 by default.
#
# TODO(dchen1107): For release 1.1, we want to fall back to
# ContainerVM installed docker by set override_deb, override_deb_sha1 and
# override_docker_ver back to '' for gce cloud provider.
{% if override_docker_ver != '' %} {% if override_docker_ver != '' %}
purge-old-docker-package: purge-old-docker-package:
@@ -135,10 +141,10 @@ purge-old-docker-package:
- mode: 644 - mode: 644
- makedirs: true - makedirs: true
lxc-docker-{{ override_docker_ver }}: docker-upgrade:
pkg.installed: pkg.installed:
- sources: - sources:
- lxc-docker-{{ override_docker_ver }}: /var/cache/docker-install/{{ override_deb }} - {{ docker_pkg_name }}: /var/cache/docker-install/{{ override_deb }}
- require: - require:
- file: /var/cache/docker-install/{{ override_deb }} - file: /var/cache/docker-install/{{ override_deb }}
{% endif %} # end override_docker_ver != '' {% endif %} # end override_docker_ver != ''
@@ -168,7 +174,7 @@ fix-service-docker:
- file: {{ environment_file }} - file: {{ environment_file }}
{% if override_docker_ver != '' %} {% if override_docker_ver != '' %}
- require: - require:
- pkg: lxc-docker-{{ override_docker_ver }} - pkg: {{ docker_pkg_name }}-{{ override_docker_ver }}
{% endif %} {% endif %}
{% endif %} {% endif %}
@@ -187,13 +193,13 @@ docker:
- watch: - watch:
- file: {{ environment_file }} - file: {{ environment_file }}
{% if override_docker_ver != '' %} {% if override_docker_ver != '' %}
- pkg: lxc-docker-{{ override_docker_ver }} - pkg: docker-upgrade
{% endif %} {% endif %}
{% if pillar.get('is_systemd') %} {% if pillar.get('is_systemd') %}
- file: {{ pillar.get('systemd_system_path') }}/docker.service - file: {{ pillar.get('systemd_system_path') }}/docker.service
{% endif %} {% endif %}
{% if override_docker_ver != '' %} {% if override_docker_ver != '' %}
- require: - require:
- pkg: lxc-docker-{{ override_docker_ver }} - pkg: docker-upgrade
{% endif %} {% endif %}
{% endif %} # end grains.os_family != 'RedHat' {% endif %} # end grains.os_family != 'RedHat'

View File

@@ -1,6 +1,7 @@
{% set cluster_name = "" -%} {% set cluster_name = "" -%}
{% set cluster_cidr = "" -%} {% set cluster_cidr = "" -%}
{% set allocate_node_cidrs = "" -%} {% set allocate_node_cidrs = "" -%}
{% set enable_horizontal_pod_autoscaler = "" -%}
{% if pillar['instance_prefix'] is defined -%} {% if pillar['instance_prefix'] is defined -%}
{% set cluster_name = "--cluster-name=" + pillar['instance_prefix'] -%} {% set cluster_name = "--cluster-name=" + pillar['instance_prefix'] -%}
@@ -11,6 +12,9 @@
{% if pillar['allocate_node_cidrs'] is defined -%} {% if pillar['allocate_node_cidrs'] is defined -%}
{% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%} {% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%}
{% endif -%} {% endif -%}
{% if pillar['enable_horizontal_pod_autoscaler'] is defined -%}
{% set enable_horizontal_pod_autoscaler = "--enable-horizontal-pod-autoscaler=" + pillar['enable_horizontal_pod_autoscaler'] -%}
{% endif -%}
{% set cloud_provider = "" -%} {% set cloud_provider = "" -%}
{% set cloud_config = "" -%} {% set cloud_config = "" -%}
@@ -34,7 +38,7 @@
{% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%} {% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%}
{% endif -%} {% endif -%}
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%} {% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + enable_horizontal_pod_autoscaler + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration # test_args has to be kept at the end, so they'll overwrite any prior configuration
{% if pillar['controller_manager_test_args'] is defined -%} {% if pillar['controller_manager_test_args'] is defined -%}

View File

@@ -32,7 +32,6 @@ mkdir -p binaries/minion
# flannel # flannel
echo "Download flannel release ..." echo "Download flannel release ..."
FLANNEL_VERSION=${FLANNEL_VERSION:-"0.4.0"} FLANNEL_VERSION=${FLANNEL_VERSION:-"0.4.0"}
echo "Flannel version is $FLANNEL_VERSION"
if [ ! -f flannel.tar.gz ] ; then if [ ! -f flannel.tar.gz ] ; then
curl -L https://github.com/coreos/flannel/releases/download/v${FLANNEL_VERSION}/flannel-${FLANNEL_VERSION}-linux-amd64.tar.gz -o flannel.tar.gz curl -L https://github.com/coreos/flannel/releases/download/v${FLANNEL_VERSION}/flannel-${FLANNEL_VERSION}-linux-amd64.tar.gz -o flannel.tar.gz
tar xzf flannel.tar.gz tar xzf flannel.tar.gz
@@ -54,10 +53,10 @@ cp $ETCD/etcd $ETCD/etcdctl binaries/master
# k8s # k8s
echo "Download kubernetes release ..." echo "Download kubernetes release ..."
K8S_VERSION=${K8S_VERSION:-"1.0.3"} KUBE_VERSION=${KUBE_VERSION:-"1.0.3"}
if [ ! -f kubernetes.tar.gz ] ; then if [ ! -f kubernetes.tar.gz ] ; then
curl -L https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v${K8S_VERSION}/kubernetes.tar.gz -o kubernetes.tar.gz curl -L https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v${KUBE_VERSION}/kubernetes.tar.gz -o kubernetes.tar.gz
tar xzf kubernetes.tar.gz tar xzf kubernetes.tar.gz
fi fi
pushd kubernetes/server pushd kubernetes/server

View File

@@ -19,8 +19,6 @@ set -e
SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR" SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR"
# use an array to record name and ip
declare -A mm
MASTER="" MASTER=""
MASTER_IP="" MASTER_IP=""
MINION_IPS="" MINION_IPS=""
@@ -443,24 +441,42 @@ function prepare-push() {
echo "Upgrading nodes to local binaries is not yet supported.Please specify the version" echo "Upgrading nodes to local binaries is not yet supported.Please specify the version"
exit 1 exit 1
fi fi
# Run build.sh to get the latest release
source "${KUBE_ROOT}/cluster/ubuntu/build.sh" # Run build.sh to get the required release
pushd ubuntu
source "build.sh"
popd
} }
# Update a kubernetes master with latest release # Update a kubernetes master with required release
function push-master { function push-master {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}" source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
setClusterInfo setClusterInfo
ii=0 ii=0
for i in ${nodes}; do for i in ${nodes}; do
if [[ "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]]; then if [[ "${roles[${ii}]}" == "a" ]]; then
echo "Cleaning on master ${i#*@}" echo "Cleaning master ${i#*@}"
ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop' || true ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop;
sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd;
sudo rm -f /opt/bin/kube* /opt/bin/flanneld;
sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld;
sudo rm -rf /etc/default/kube* /etc/default/flanneld;
sudo rm -rf ~/kube' || true
provision-master provision-master
elif [[ "${roles[${ii}]}" == "ai" ]]; then
echo "Cleaning master ${i#*@}"
ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop;
sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd;
sudo rm -f /opt/bin/kube* /opt/bin/flanneld;
sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld;
sudo rm -rf /etc/default/kube* /etc/default/flanneld;
sudo rm -rf ~/kube' || true
provision-masterandminion
elif [[ "${roles[${ii}]}" == "i" ]]; then elif [[ "${roles[${ii}]}" == "i" ]]; then
((ii=ii+1))
continue continue
else else
echo "unsupported role for ${i}. please check" echo "unsupported role for ${i}, please check"
exit 1 exit 1
fi fi
((ii=ii+1)) ((ii=ii+1))
@@ -468,41 +484,76 @@ function push-master {
verify-cluster verify-cluster
} }
# Update a kubernetes node with latest release # Update a kubernetes node with required release
function push-node() { function push-node() {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}" source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
node=${1} node_ip=${1}
setClusterInfo setClusterInfo
ii=0 ii=0
existing=false
for i in ${nodes}; do for i in ${nodes}; do
if [[ "${roles[${ii}]}" == "i" || "${roles[${ii}]}" == "ai" && $i == *$node ]]; then if [[ "${roles[${ii}]}" == "i" && ${i#*@} == $node_ip ]]; then
echo "Cleaning on node ${i#*@}" echo "Cleaning node ${i#*@}"
ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop' || true ssh -t $i 'sudo -p "[sudo] stop the all process: " service flanneld stop;
sudo rm -f /opt/bin/kube* /opt/bin/flanneld;
sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld;
sudo rm -rf /etc/default/kube* /etc/default/flanneld;
sudo rm -rf ~/kube' || true
provision-minion $i provision-minion $i
existing=true
elif [[ "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]] && [[ ${i#*@} == $node_ip ]]; then
echo "${i} is master node, please try ./kube-push -m instead"
existing=true
elif [[ "${roles[${ii}]}" == "i" || "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]]; then
((ii=ii+1))
continue
else else
echo "unsupported role for ${i}, or nodes ${i} don't exist. please check" echo "unsupported role for ${i}, please check"
exit 1 exit 1
fi fi
((ii=ii+1)) ((ii=ii+1))
done done
verify-cluster if [[ "${existing}" == false ]]; then
echo "node ${node_ip} does not exist"
else
verify-cluster
fi
} }
# Update a kubernetes cluster with latest source # Update a kubernetes cluster with required source
function kube-push { function kube-push {
prepare-push prepare-push
#stop all the kube's process & etcd
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}" source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}"
#stop all the kube's process & etcd
ii=0
for i in ${nodes}; do for i in ${nodes}; do
echo "Cleaning on node ${i#*@}" {
ssh -t $i 'sudo -p "[sudo] stop all process: " service etcd stop' || true echo "Cleaning on node ${i#*@}"
ssh -t $i 'rm -f /opt/bin/kube* /etc/init/kube* /etc/init.d/kube* /etc/default/kube*; rm -rf ~/kube' || true if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then
ssh -t $i 'pgrep etcd && sudo -p "[sudo] password for cleaning etcd data: " service etcd stop;
sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd' || true
elif [[ "${roles[${ii}]}" == "i" ]]; then
ssh -t $i 'pgrep flanneld && sudo -p "[sudo] password for stopping flanneld: " service flanneld stop' || true
else
echo "unsupported role for ${i}"
fi
ssh -t $i 'sudo rm -f /opt/bin/kube* /opt/bin/flanneld;
sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld;
sudo rm -rf /etc/default/kube* /etc/default/flanneld;
sudo rm -rf ~/kube' || true
}
((ii=ii+1))
done done
#Update all nodes with the lasted release
#Update all nodes with the required release
if [[ ! -f "ubuntu/binaries/master/kube-apiserver" ]]; then if [[ ! -f "ubuntu/binaries/master/kube-apiserver" ]]; then
echo "There is no latest release of kubernetes,please check first" echo "There is no required release of kubernetes, please check first"
exit 1 exit 1
fi fi
#provision all nodes,include master&nodes #provision all nodes,include master&nodes
setClusterInfo setClusterInfo
ii=0 ii=0

View File

@@ -27,8 +27,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
_ "k8s.io/kubernetes/pkg/api/v1" _ "k8s.io/kubernetes/pkg/api/v1"
_ "k8s.io/kubernetes/pkg/expapi" _ "k8s.io/kubernetes/pkg/apis/experimental"
_ "k8s.io/kubernetes/pkg/expapi/v1" _ "k8s.io/kubernetes/pkg/apis/experimental/v1"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@@ -44,6 +44,24 @@ var (
groupVersion = flag.StringP("version", "v", "api/v1", "groupPath/version for conversion.") groupVersion = flag.StringP("version", "v", "api/v1", "groupPath/version for conversion.")
) )
// We're moving to pkg/apis/group/version. This handles new and legacy packages.
func pkgPath(group, version string) string {
if group == "" {
group = "api"
}
gv := group
if version != "" {
gv = path.Join(group, version)
}
switch {
case group == "api":
// TODO(lavalamp): remove this special case when we move api to apis/api
return path.Join(pkgBase, gv)
default:
return path.Join(pkgBase, "apis", gv)
}
}
func main() { func main() {
runtime.GOMAXPROCS(runtime.NumCPU()) runtime.GOMAXPROCS(runtime.NumCPU())
flag.Parse() flag.Parse()
@@ -70,14 +88,14 @@ func main() {
glog.Fatalf("error writing package line: %v", err) glog.Fatalf("error writing package line: %v", err)
} }
versionPath := path.Join(pkgBase, group, version) versionPath := pkgPath(group, version)
generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw(), versionPath) generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw(), versionPath)
apiShort := generator.AddImport(path.Join(pkgBase, "api")) apiShort := generator.AddImport(path.Join(pkgBase, "api"))
generator.AddImport(path.Join(pkgBase, "api/resource")) generator.AddImport(path.Join(pkgBase, "api/resource"))
// TODO(wojtek-t): Change the overwrites to a flag. // TODO(wojtek-t): Change the overwrites to a flag.
generator.OverwritePackage(version, "") generator.OverwritePackage(version, "")
for _, knownType := range api.Scheme.KnownTypes(version) { for _, knownType := range api.Scheme.KnownTypes(version) {
if !strings.HasPrefix(knownType.PkgPath(), versionPath) { if knownType.PkgPath() != versionPath {
continue continue
} }
if err := generator.GenerateConversionsForType(version, knownType); err != nil { if err := generator.GenerateConversionsForType(version, knownType); err != nil {

View File

@@ -27,8 +27,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
_ "k8s.io/kubernetes/pkg/api/v1" _ "k8s.io/kubernetes/pkg/api/v1"
_ "k8s.io/kubernetes/pkg/expapi" _ "k8s.io/kubernetes/pkg/apis/experimental"
_ "k8s.io/kubernetes/pkg/expapi/v1" _ "k8s.io/kubernetes/pkg/apis/experimental/v1"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@@ -45,6 +45,32 @@ var (
overwrites = flag.StringP("overwrites", "o", "", "Comma-separated overwrites for package names") overwrites = flag.StringP("overwrites", "o", "", "Comma-separated overwrites for package names")
) )
// types inside the api package don't need to say "api.Scheme"; all others do.
func destScheme(group, version string) string {
if group == "api" && version == "" {
return "Scheme"
}
return "api.Scheme"
}
// We're moving to pkg/apis/group/version. This handles new and legacy packages.
func pkgPath(group, version string) string {
if group == "" {
group = "api"
}
gv := group
if version != "" {
gv = path.Join(group, version)
}
switch {
case group == "api":
// TODO(lavalamp): remove this special case when we move api to apis/api
return path.Join(pkgBase, gv)
default:
return path.Join(pkgBase, "apis", gv)
}
}
func main() { func main() {
runtime.GOMAXPROCS(runtime.NumCPU()) runtime.GOMAXPROCS(runtime.NumCPU())
flag.Parse() flag.Parse()
@@ -65,10 +91,7 @@ func main() {
group, version := path.Split(*groupVersion) group, version := path.Split(*groupVersion)
group = strings.TrimRight(group, "/") group = strings.TrimRight(group, "/")
registerTo := "api.Scheme" registerTo := destScheme(group, version)
if *groupVersion == "api/" {
registerTo = "Scheme"
}
pkgname := group pkgname := group
if len(version) != 0 { if len(version) != 0 {
pkgname = version pkgname = version
@@ -79,7 +102,7 @@ func main() {
glog.Fatalf("error writing package line: %v", err) glog.Fatalf("error writing package line: %v", err)
} }
versionPath := path.Join(pkgBase, group, version) versionPath := pkgPath(group, version)
generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), versionPath, sets.NewString("k8s.io/kubernetes")) generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), versionPath, sets.NewString("k8s.io/kubernetes"))
generator.AddImport(path.Join(pkgBase, "api")) generator.AddImport(path.Join(pkgBase, "api"))
@@ -93,7 +116,7 @@ func main() {
} }
} }
for _, knownType := range api.Scheme.KnownTypes(version) { for _, knownType := range api.Scheme.KnownTypes(version) {
if !strings.HasPrefix(knownType.PkgPath(), versionPath) { if knownType.PkgPath() != versionPath {
continue continue
} }
if err := generator.AddType(knownType); err != nil { if err := generator.AddType(knownType); err != nil {

View File

@@ -39,13 +39,13 @@ import (
apierrors "k8s.io/kubernetes/pkg/api/errors" apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/latest"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
explatest "k8s.io/kubernetes/pkg/apis/experimental/latest"
"k8s.io/kubernetes/pkg/apiserver" "k8s.io/kubernetes/pkg/apiserver"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/record"
"k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/controller/node" "k8s.io/kubernetes/pkg/controller/node"
replicationControllerPkg "k8s.io/kubernetes/pkg/controller/replication" replicationControllerPkg "k8s.io/kubernetes/pkg/controller/replication"
explatest "k8s.io/kubernetes/pkg/expapi/latest"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/kubelet"
"k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/cadvisor"

View File

@@ -35,11 +35,11 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/latest"
"k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/meta"
explatest "k8s.io/kubernetes/pkg/apis/experimental/latest"
"k8s.io/kubernetes/pkg/apiserver" "k8s.io/kubernetes/pkg/apiserver"
"k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/capabilities"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
explatest "k8s.io/kubernetes/pkg/expapi/latest"
"k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/storage" "k8s.io/kubernetes/pkg/storage"

View File

@@ -35,12 +35,13 @@ import (
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/autoscaler" "k8s.io/kubernetes/pkg/controller/daemon"
"k8s.io/kubernetes/pkg/controller/autoscaler/metrics"
"k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/controller/namespace" "k8s.io/kubernetes/pkg/controller/namespace"
"k8s.io/kubernetes/pkg/controller/node" "k8s.io/kubernetes/pkg/controller/node"
"k8s.io/kubernetes/pkg/controller/persistentvolume" "k8s.io/kubernetes/pkg/controller/persistentvolume"
"k8s.io/kubernetes/pkg/controller/podautoscaler"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
replicationControllerPkg "k8s.io/kubernetes/pkg/controller/replication" replicationControllerPkg "k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/controller/resourcequota" "k8s.io/kubernetes/pkg/controller/resourcequota"
"k8s.io/kubernetes/pkg/controller/route" "k8s.io/kubernetes/pkg/controller/route"
@@ -63,6 +64,7 @@ type CMServer struct {
CloudConfigFile string CloudConfigFile string
ConcurrentEndpointSyncs int ConcurrentEndpointSyncs int
ConcurrentRCSyncs int ConcurrentRCSyncs int
ConcurrentDSCSyncs int
ServiceSyncPeriod time.Duration ServiceSyncPeriod time.Duration
NodeSyncPeriod time.Duration NodeSyncPeriod time.Duration
ResourceQuotaSyncPeriod time.Duration ResourceQuotaSyncPeriod time.Duration
@@ -98,6 +100,7 @@ func NewCMServer() *CMServer {
Address: net.ParseIP("127.0.0.1"), Address: net.ParseIP("127.0.0.1"),
ConcurrentEndpointSyncs: 5, ConcurrentEndpointSyncs: 5,
ConcurrentRCSyncs: 5, ConcurrentRCSyncs: 5,
ConcurrentDSCSyncs: 2,
ServiceSyncPeriod: 5 * time.Minute, ServiceSyncPeriod: 5 * time.Minute,
NodeSyncPeriod: 10 * time.Second, NodeSyncPeriod: 10 * time.Second,
ResourceQuotaSyncPeriod: 10 * time.Second, ResourceQuotaSyncPeriod: 10 * time.Second,
@@ -213,6 +216,9 @@ func (s *CMServer) Run(_ []string) error {
controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient, replicationControllerPkg.BurstReplicas) controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient, replicationControllerPkg.BurstReplicas)
go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop) go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop)
go daemon.NewDaemonSetsController(kubeClient).
Run(s.ConcurrentDSCSyncs, util.NeverStop)
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil { if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err) glog.Fatalf("Cloud provider could not be initialized: %v", err)
@@ -248,7 +254,7 @@ func (s *CMServer) Run(_ []string) error {
namespaceController.Run() namespaceController.Run()
if s.EnableHorizontalPodAutoscaler { if s.EnableHorizontalPodAutoscaler {
horizontalPodAutoscalerController := autoscalercontroller.New(kubeClient, metrics.NewHeapsterMetricsClient(kubeClient)) horizontalPodAutoscalerController := podautoscaler.NewHorizontalController(kubeClient, metrics.NewHeapsterMetricsClient(kubeClient))
horizontalPodAutoscalerController.Run(s.HorizontalPodAutoscalerSyncPeriod) horizontalPodAutoscalerController.Run(s.HorizontalPodAutoscalerSyncPeriod)
} }

View File

@@ -27,10 +27,10 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/client/unversioned/record"
"k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy"
"k8s.io/kubernetes/pkg/proxy/config" "k8s.io/kubernetes/pkg/proxy/config"

View File

@@ -35,11 +35,11 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/client/chaosclient" "k8s.io/kubernetes/pkg/client/chaosclient"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth" clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/client/unversioned/record"
"k8s.io/kubernetes/pkg/credentialprovider" "k8s.io/kubernetes/pkg/credentialprovider"
"k8s.io/kubernetes/pkg/healthz" "k8s.io/kubernetes/pkg/healthz"
"k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/kubelet"
@@ -119,6 +119,7 @@ type KubeletServer struct {
ResolverConfig string ResolverConfig string
ResourceContainer string ResourceContainer string
RktPath string RktPath string
RktStage1Image string
RootDirectory string RootDirectory string
RunOnce bool RunOnce bool
StandaloneMode bool StandaloneMode bool
@@ -189,6 +190,7 @@ func NewKubeletServer() *KubeletServer {
RegistryBurst: 10, RegistryBurst: 10,
ResourceContainer: "/kubelet", ResourceContainer: "/kubelet",
RktPath: "", RktPath: "",
RktStage1Image: "",
RootDirectory: defaultRootDir, RootDirectory: defaultRootDir,
SyncFrequency: 10 * time.Second, SyncFrequency: 10 * time.Second,
SystemContainer: "", SystemContainer: "",
@@ -254,6 +256,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.CgroupRoot, "cgroup-root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.") fs.StringVar(&s.CgroupRoot, "cgroup-root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.")
fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.") fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.")
fs.StringVar(&s.RktPath, "rkt-path", s.RktPath, "Path of rkt binary. Leave empty to use the first rkt in $PATH. Only used if --container-runtime='rkt'") fs.StringVar(&s.RktPath, "rkt-path", s.RktPath, "Path of rkt binary. Leave empty to use the first rkt in $PATH. Only used if --container-runtime='rkt'")
fs.StringVar(&s.RktStage1Image, "rkt-stage1-image", s.RktStage1Image, "image to use as stage1. Local paths and http/https URLs are supported. If empty, the 'stage1.aci' in the same directory as '--rkt-path' will be used")
fs.StringVar(&s.SystemContainer, "system-container", s.SystemContainer, "Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").") fs.StringVar(&s.SystemContainer, "system-container", s.SystemContainer, "Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").")
fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.") fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
fs.IntVar(&s.MaxPods, "max-pods", 40, "Number of Pods that can run on this Kubelet.") fs.IntVar(&s.MaxPods, "max-pods", 40, "Number of Pods that can run on this Kubelet.")
@@ -364,6 +367,7 @@ func (s *KubeletServer) KubeletConfig() (*KubeletConfig, error) {
ResolverConfig: s.ResolverConfig, ResolverConfig: s.ResolverConfig,
ResourceContainer: s.ResourceContainer, ResourceContainer: s.ResourceContainer,
RktPath: s.RktPath, RktPath: s.RktPath,
RktStage1Image: s.RktStage1Image,
RootDirectory: s.RootDirectory, RootDirectory: s.RootDirectory,
Runonce: s.RunOnce, Runonce: s.RunOnce,
StandaloneMode: (len(s.APIServerList) == 0), StandaloneMode: (len(s.APIServerList) == 0),
@@ -789,6 +793,7 @@ type KubeletConfig struct {
ResolverConfig string ResolverConfig string
ResourceContainer string ResourceContainer string
RktPath string RktPath string
RktStage1Image string
RootDirectory string RootDirectory string
Runonce bool Runonce bool
StandaloneMode bool StandaloneMode bool
@@ -851,6 +856,7 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
kc.CgroupRoot, kc.CgroupRoot,
kc.ContainerRuntime, kc.ContainerRuntime,
kc.RktPath, kc.RktPath,
kc.RktStage1Image,
kc.Mounter, kc.Mounter,
kc.DockerDaemonContainer, kc.DockerDaemonContainer,
kc.SystemContainer, kc.SystemContainer,

View File

@@ -359,6 +359,7 @@ _kubectl_create()
flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml")
flags+=("--output=") flags+=("--output=")
two_word_flags+=("-o") two_word_flags+=("-o")
flags+=("--schema-cache-dir=")
flags+=("--validate") flags+=("--validate")
must_have_one_flag=() must_have_one_flag=()
@@ -388,6 +389,7 @@ _kubectl_replace()
flags+=("--grace-period=") flags+=("--grace-period=")
flags+=("--output=") flags+=("--output=")
two_word_flags+=("-o") two_word_flags+=("-o")
flags+=("--schema-cache-dir=")
flags+=("--timeout=") flags+=("--timeout=")
flags+=("--validate") flags+=("--validate")
@@ -534,6 +536,7 @@ _kubectl_rolling-update()
flags+=("--output-version=") flags+=("--output-version=")
flags+=("--poll-interval=") flags+=("--poll-interval=")
flags+=("--rollback") flags+=("--rollback")
flags+=("--schema-cache-dir=")
flags+=("--show-all") flags+=("--show-all")
flags+=("-a") flags+=("-a")
flags+=("--sort-by=") flags+=("--sort-by=")
@@ -687,6 +690,7 @@ _kubectl_run()
flags+=("--image=") flags+=("--image=")
flags+=("--labels=") flags+=("--labels=")
two_word_flags+=("-l") two_word_flags+=("-l")
flags+=("--limits=")
flags+=("--no-headers") flags+=("--no-headers")
flags+=("--output=") flags+=("--output=")
two_word_flags+=("-o") two_word_flags+=("-o")
@@ -695,6 +699,7 @@ _kubectl_run()
flags+=("--port=") flags+=("--port=")
flags+=("--replicas=") flags+=("--replicas=")
two_word_flags+=("-r") two_word_flags+=("-r")
flags+=("--requests=")
flags+=("--restart=") flags+=("--restart=")
flags+=("--show-all") flags+=("--show-all")
flags+=("-a") flags+=("-a")
@@ -762,6 +767,7 @@ _kubectl_expose()
flags+=("--generator=") flags+=("--generator=")
flags+=("--labels=") flags+=("--labels=")
two_word_flags+=("-l") two_word_flags+=("-l")
flags+=("--load-balancer-ip=")
flags+=("--name=") flags+=("--name=")
flags+=("--no-headers") flags+=("--no-headers")
flags+=("--output=") flags+=("--output=")
@@ -781,7 +787,6 @@ _kubectl_expose()
flags+=("--type=") flags+=("--type=")
must_have_one_flag=() must_have_one_flag=()
must_have_one_flag+=("--port=")
must_have_one_noun=() must_have_one_noun=()
} }

View File

@@ -47,6 +47,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/spf13/pflag" "github.com/spf13/pflag"
"k8s.io/kubernetes/pkg/controller/daemon"
) )
// CMServer is the main context object for the controller manager. // CMServer is the main context object for the controller manager.
@@ -113,6 +114,9 @@ func (s *CMServer) Run(_ []string) error {
controllerManager := replicationcontroller.NewReplicationManager(kubeClient, replicationcontroller.BurstReplicas) controllerManager := replicationcontroller.NewReplicationManager(kubeClient, replicationcontroller.BurstReplicas)
go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop) go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop)
go daemon.NewDaemonSetsController(kubeClient).
Run(s.ConcurrentDSCSyncs, util.NeverStop)
//TODO(jdef) should eventually support more cloud providers here //TODO(jdef) should eventually support more cloud providers here
if s.CloudProvider != mesos.ProviderName { if s.CloudProvider != mesos.ProviderName {
glog.Fatalf("Only provider %v is supported, you specified %v", mesos.ProviderName, s.CloudProvider) glog.Fatalf("Only provider %v is supported, you specified %v", mesos.ProviderName, s.CloudProvider)

View File

@@ -37,8 +37,8 @@ import (
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask" "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/cache"
"k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/kubelet"
kconfig "k8s.io/kubernetes/pkg/kubelet/config" kconfig "k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/dockertools"

View File

@@ -325,6 +325,7 @@ func (ks *KubeletExecutorServer) createAndInitKubelet(
kc.CgroupRoot, kc.CgroupRoot,
kc.ContainerRuntime, kc.ContainerRuntime,
kc.RktPath, kc.RktPath,
kc.RktStage1Image,
kc.Mounter, kc.Mounter,
kc.DockerDaemonContainer, kc.DockerDaemonContainer,
kc.SystemContainer, kc.SystemContainer,

View File

@@ -29,7 +29,7 @@ import (
"k8s.io/kubernetes/contrib/mesos/pkg/proc" "k8s.io/kubernetes/contrib/mesos/pkg/proc"
"k8s.io/kubernetes/contrib/mesos/pkg/queue" "k8s.io/kubernetes/contrib/mesos/pkg/queue"
"k8s.io/kubernetes/contrib/mesos/pkg/runtime" "k8s.io/kubernetes/contrib/mesos/pkg/runtime"
"k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
) )

View File

@@ -19,7 +19,7 @@ package queue
import ( import (
"time" "time"
"k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/client/cache"
) )
type EventType int type EventType int

View File

@@ -35,9 +35,9 @@ import (
"k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask" "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/cache"
"k8s.io/kubernetes/pkg/client/unversioned/record"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
plugin "k8s.io/kubernetes/plugin/pkg/scheduler" plugin "k8s.io/kubernetes/plugin/pkg/scheduler"

View File

@@ -26,8 +26,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/cache"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
kutil "k8s.io/kubernetes/pkg/util" kutil "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"

View File

@@ -22,7 +22,7 @@ import (
"k8s.io/kubernetes/contrib/mesos/pkg/queue" "k8s.io/kubernetes/contrib/mesos/pkg/queue"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/client/cache"
) )
// wrapper for the k8s pod type so that we can define additional methods on a "pod" // wrapper for the k8s pod type so that we can define additional methods on a "pod"

View File

@@ -26,8 +26,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/endpoints" "k8s.io/kubernetes/pkg/api/endpoints"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/cache"
kservice "k8s.io/kubernetes/pkg/controller/endpoint" kservice "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"

View File

@@ -222,7 +222,7 @@ you are doing [manual node administration](#manual-node-administration), then yo
capacity when adding a node. capacity when adding a node.
The Kubernetes scheduler ensures that there are enough resources for all the pods on a node. It The Kubernetes scheduler ensures that there are enough resources for all the pods on a node. It
checks that the sum of the limits of containers on the node is no greater than than the node capacity. It checks that the sum of the limits of containers on the node is no greater than the node capacity. It
includes all containers started by kubelet, but not containers started directly by docker, nor includes all containers started by kubelet, but not containers started directly by docker, nor
processes not in containers. processes not in containers.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

After

Width:  |  Height:  |  Size: 94 KiB

View File

@@ -60,7 +60,7 @@ Instead of a single Timestamp, each event object [contains](http://releases.k8s.
Each binary that generates events: Each binary that generates events:
* Maintains a historical record of previously generated events: * Maintains a historical record of previously generated events:
* Implemented with ["Least Recently Used Cache"](https://github.com/golang/groupcache/blob/master/lru/lru.go) in [`pkg/client/unversioned/record/events_cache.go`](../../pkg/client/unversioned/record/events_cache.go). * Implemented with ["Least Recently Used Cache"](https://github.com/golang/groupcache/blob/master/lru/lru.go) in [`pkg/client/record/events_cache.go`](../../pkg/client/record/events_cache.go).
* The key in the cache is generated from the event object minus timestamps/count/transient fields, specifically the following events fields are used to construct a unique key for an event: * The key in the cache is generated from the event object minus timestamps/count/transient fields, specifically the following events fields are used to construct a unique key for an event:
* `event.Source.Component` * `event.Source.Component`
* `event.Source.Host` * `event.Source.Host`

View File

@@ -38,7 +38,7 @@ with a number of existing API types and with the [API
conventions](api-conventions.md). If creating a new API conventions](api-conventions.md). If creating a new API
type/resource, we also recommend that you first send a PR containing type/resource, we also recommend that you first send a PR containing
just a proposal for the new API types, and that you initially target just a proposal for the new API types, and that you initially target
the experimental API (pkg/expapi). the experimental API (pkg/apis/experimental).
The Kubernetes API has two major components - the internal structures and The Kubernetes API has two major components - the internal structures and
the versioned APIs. The versioned APIs are intended to be stable, while the the versioned APIs. The versioned APIs are intended to be stable, while the
@@ -399,10 +399,10 @@ The conversion code resides with each versioned API. There are two files:
functions functions
- `pkg/api/<version>/conversion_generated.go` containing auto-generated - `pkg/api/<version>/conversion_generated.go` containing auto-generated
conversion functions conversion functions
- `pkg/expapi/<version>/conversion.go` containing manually written conversion - `pkg/apis/experimental/<version>/conversion.go` containing manually written
functions
- `pkg/expapi/<version>/conversion_generated.go` containing auto-generated
conversion functions conversion functions
- `pkg/apis/experimental/<version>/conversion_generated.go` containing
auto-generated conversion functions
Since auto-generated conversion functions are using manually written ones, Since auto-generated conversion functions are using manually written ones,
those manually written should be named with a defined convention, i.e. a function those manually written should be named with a defined convention, i.e. a function
@@ -437,7 +437,7 @@ of your versioned api objects.
The deep copy code resides with each versioned API: The deep copy code resides with each versioned API:
- `pkg/api/<version>/deep_copy_generated.go` containing auto-generated copy functions - `pkg/api/<version>/deep_copy_generated.go` containing auto-generated copy functions
- `pkg/expapi/<version>/deep_copy_generated.go` containing auto-generated copy functions - `pkg/apis/experimental/<version>/deep_copy_generated.go` containing auto-generated copy functions
To regenerate them: To regenerate them:
- run - run
@@ -446,6 +446,23 @@ To regenerate them:
hack/update-generated-deep-copies.sh hack/update-generated-deep-copies.sh
``` ```
## Making a new API Group
This section is under construction, as we make the tooling completely generic.
At the moment, you'll have to make a new directory under pkg/apis/; copy the
directory structure from pkg/apis/experimental. Add the new group/version to all
of the hack/{verify,update}-generated-{deep-copy,conversions,swagger}.sh files
in the appropriate places--it should just require adding your new group/version
to a bash array. You will also need to make sure your new types are imported by
the generation commands (cmd/gendeepcopy/ & cmd/genconversion). These
instructions may not be complete and will be updated as we gain experience.
Adding API groups outside of the pkg/apis/ directory is not currently supported,
but is clearly desirable. The deep copy & conversion generators need to work by
parsing go files instead of by reflection; then they will be easy to point at
arbitrary directories: see issue [#13775](http://issue.k8s.io/13775).
## Update the fuzzer ## Update the fuzzer
Part of our testing regimen for APIs is to "fuzz" (fill with random values) API Part of our testing regimen for APIs is to "fuzz" (fill with random values) API

View File

@@ -108,7 +108,7 @@ Once the playbook as finished, it will print out the IP of the Kubernetes master
SSH to it using the key that was created and using the _core_ user and you can list the machines in your cluster: SSH to it using the key that was created and using the _core_ user and you can list the machines in your cluster:
$ ssh -i ~/.ssh/id_rsa_k8s core@<maste IP> $ ssh -i ~/.ssh/id_rsa_k8s core@<master IP>
$ fleetctl list-machines $ fleetctl list-machines
MACHINE IP METADATA MACHINE IP METADATA
a017c422... <node #1 IP> role=node a017c422... <node #1 IP> role=node

View File

@@ -42,7 +42,7 @@ Running Kubernetes locally via Docker
- [Step Three: Run the service proxy](#step-three-run-the-service-proxy) - [Step Three: Run the service proxy](#step-three-run-the-service-proxy)
- [Test it out](#test-it-out) - [Test it out](#test-it-out)
- [Run an application](#run-an-application) - [Run an application](#run-an-application)
- [Expose it as a service:](#expose-it-as-a-service) - [Expose it as a service](#expose-it-as-a-service)
- [A note on turning down your cluster](#a-note-on-turning-down-your-cluster) - [A note on turning down your cluster](#a-note-on-turning-down-your-cluster)
### Overview ### Overview
@@ -128,7 +128,7 @@ On OS/X you will need to set up port forwarding via ssh:
boot2docker ssh -L8080:localhost:8080 boot2docker ssh -L8080:localhost:8080
``` ```
List the nodes in your cluster by running:: List the nodes in your cluster by running:
```sh ```sh
kubectl get nodes kubectl get nodes
@@ -149,7 +149,7 @@ If you are running different Kubernetes clusters, you may need to specify `-s ht
kubectl -s http://localhost:8080 run nginx --image=nginx --port=80 kubectl -s http://localhost:8080 run nginx --image=nginx --port=80
``` ```
now run `docker ps` you should see nginx running. You may need to wait a few minutes for the image to get pulled. Now run `docker ps` you should see nginx running. You may need to wait a few minutes for the image to get pulled.
### Expose it as a service ### Expose it as a service
@@ -164,7 +164,7 @@ NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR
nginx 10.0.93.211 <none> 80/TCP run=nginx 1h nginx 10.0.93.211 <none> 80/TCP run=nginx 1h
``` ```
If `CLUSTER_IP` is blank run the following command to obtain it. Know issue #10836 If `CLUSTER_IP` is blank run the following command to obtain it. Know issue [#10836](https://github.com/kubernetes/kubernetes/issues/10836)
```sh ```sh
kubectl get svc nginx kubectl get svc nginx

View File

@@ -123,7 +123,7 @@ KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_API_ARGS="" KUBE_API_ARGS=""
``` ```
* Edit /etc/etcd/etcd.conf,let the etcd to listen all the ip instead of 127.0.0.1, if not, you will get the error like "connection refused" * Edit /etc/etcd/etcd.conf,let the etcd to listen all the ip instead of 127.0.0.1, if not, you will get the error like "connection refused". Note that Fedora 22 uses etcd 2.0, One of the changes in etcd 2.0 is that now uses port 2379 and 2380 (as opposed to etcd 0.46 which userd 4001 and 7001).
```sh ```sh
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:4001" ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:4001"

View File

@@ -132,6 +132,24 @@ However the gcloud bundled kubectl version may be older than the one downloaded
get.k8s.io install script. We recommend you use the downloaded binary to avoid get.k8s.io install script. We recommend you use the downloaded binary to avoid
potential issues with client/server version skew. potential issues with client/server version skew.
#### Enabling bash completion of the Kubernetes command line tools
You may find it useful to enable `kubectl` bash completion:
```
$ source ./contrib/completions/bash/kubectl
```
**Note**: This will last for the duration of your bash session. If you want to make this permanent you need to add this line in your bash profile.
Alternatively, on most linux distributions you can also move the completions file to your bash_completions.d like this:
```
$ cp ./contrib/completions/bash/kubectl /etc/bash_completion.d/
```
but then you have to update it when you update kubectl.
### Getting started with your cluster ### Getting started with your cluster
#### Inspect your cluster #### Inspect your cluster

View File

@@ -38,36 +38,31 @@ We still have [a bunch of work](http://issue.k8s.io/8262) to do to make the expe
### **Prerequisite** ### **Prerequisite**
- [systemd](http://www.freedesktop.org/wiki/Software/systemd/) should be installed on your machine and should be enabled. The minimum version required at this moment (2015/05/28) is [215](http://lists.freedesktop.org/archives/systemd-devel/2014-July/020903.html). - [systemd](http://www.freedesktop.org/wiki/Software/systemd/) should be installed on the machine and should be enabled. The minimum version required at this moment (2015/09/01) is 219
*(Note that systemd is not required by rkt itself, we are using it here to monitor and manage the pods launched by kubelet.)* *(Note that systemd is not required by rkt itself, we are using it here to monitor and manage the pods launched by kubelet.)*
- Install the latest rkt release according to the instructions [here](https://github.com/coreos/rkt). - Install the latest rkt release according to the instructions [here](https://github.com/coreos/rkt).
The minimum version required for now is [v0.5.6](https://github.com/coreos/rkt/releases/tag/v0.5.6). The minimum version required for now is [v0.8.0](https://github.com/coreos/rkt/releases/tag/v0.8.0).
- Make sure the `rkt metadata service` is running because it is necessary for running pod in private network mode.
More details about the networking of rkt can be found in the [documentation](https://github.com/coreos/rkt/blob/master/Documentation/networking.md).
To start the `rkt metadata service`, you can simply run:
```console
$ sudo rkt metadata-service
```
If you want the service to be running as a systemd service, then:
```console
$ sudo systemd-run rkt metadata-service
```
Alternatively, you can use the [rkt-metadata.service](https://github.com/coreos/rkt/blob/master/dist/init/systemd/rkt-metadata.service) and [rkt-metadata.socket](https://github.com/coreos/rkt/blob/master/dist/init/systemd/rkt-metadata.socket) to start the service.
- Note that for rkt version later than v0.7.0, `metadata service` is not required for running pods in private networks. So now rkt pods will not register the metadata service be default.
### Local cluster ### Local cluster
To use rkt as the container runtime, you just need to set the environment variable `CONTAINER_RUNTIME`: To use rkt as the container runtime, we need to supply `--container-runtime=rkt` and `--rkt-path=$PATH_TO_RKT_BINARY` to kubelet. Additionally we can provide `--rkt-stage1-image` flag
as well to select which [stage1 image](https://github.com/coreos/rkt/blob/master/Documentation/running-lkvm-stage1.md) we want to use.
If you are using the [hack/local-up-cluster.sh](../../../hack/local-up-cluster.sh) script to launch the local cluster, then you can edit the environment variable `CONTAINER_RUNTIME`, `RKT_PATH` and `RKT_STAGE1_IMAGE` to
set these flags:
```console ```console
$ export CONTAINER_RUNTIME=rkt $ export CONTAINER_RUNTIME=rkt
$ export RKT_PATH=$PATH_TO_RKT_BINARY
$ export RKT_STAGE1_IMAGE=PATH=$PATH_TO_STAGE1_IMAGE
```
Then we can launch the local cluster using the script:
```console
$ hack/local-up-cluster.sh $ hack/local-up-cluster.sh
``` ```
@@ -85,7 +80,7 @@ $ export KUBE_CONTAINER_RUNTIME=rkt
You can optionally choose the version of rkt used by setting `KUBE_RKT_VERSION`: You can optionally choose the version of rkt used by setting `KUBE_RKT_VERSION`:
```console ```console
$ export KUBE_RKT_VERSION=0.5.6 $ export KUBE_RKT_VERSION=0.8.0
``` ```
Then you can launch the cluster by: Then you can launch the cluster by:
@@ -109,7 +104,7 @@ $ export KUBE_CONTAINER_RUNTIME=rkt
You can optionally choose the version of rkt used by setting `KUBE_RKT_VERSION`: You can optionally choose the version of rkt used by setting `KUBE_RKT_VERSION`:
```console ```console
$ export KUBE_RKT_VERSION=0.5.6 $ export KUBE_RKT_VERSION=0.8.0
``` ```
You can optionally choose the CoreOS channel by setting `COREOS_CHANNEL`: You can optionally choose the CoreOS channel by setting `COREOS_CHANNEL`:
@@ -134,6 +129,46 @@ See [a simple nginx example](../../../docs/user-guide/simple-nginx.md) to try ou
For more complete applications, please look in the [examples directory](../../../examples/). For more complete applications, please look in the [examples directory](../../../examples/).
### Debugging
Here are severals tips for you when you run into any issues.
##### Check logs
By default, the log verbose level is 2. In order to see more logs related to rkt, we can set the verbose level to 4.
For local cluster, we can set the environment variable: `LOG_LEVEL=4`.
If the cluster is using salt, we can edit the [logging.sls](../../../cluster/saltbase/pillar/logging.sls) in the saltbase.
##### Check rkt pod status
To check the pods' status, we can use rkt command, such as `rkt list`, `rkt status`, `rkt image list`, etc.
More information about rkt command line can be found [here](https://github.com/coreos/rkt/blob/master/Documentation/commands.md)
##### Check journal logs
As we use systemd to launch rkt pods(by creating service files which will run `rkt run-prepared`, we can check the pods' log
using `journalctl`:
- Check the running state of the systemd service:
```console
$ sudo journalctl -u $SERVICE_FILE
```
where `$SERVICE_FILE` is the name of the service file created for the pod, you can find it in the kubelet logs.
##### Check the log of the container in the pod:
```console
$ sudo journalctl -M rkt-$UUID -u $CONTAINER_NAME
```
where `$UUID` is the rkt pod's UUID, which you can find via `rkt list --full`, and `$CONTAINER_NAME` is the container's name.
##### Check Kubernetes events, logs.
Besides above tricks, Kubernetes also provides us handy tools for debugging the pods. More information can be found [here](../../../docs/user-guide/application-troubleshooting.md)
<!-- BEGIN MUNGE: GENERATED_ANALYTICS --> <!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/getting-started-guides/rkt/README.md?pixel)]() [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/getting-started-guides/rkt/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS --> <!-- END MUNGE: GENERATED_ANALYTICS -->

View File

@@ -246,7 +246,8 @@ kubernetes/cluster/ubuntu/build.sh
sudo cp -f binaries/minion/* /usr/bin sudo cp -f binaries/minion/* /usr/bin
# Get the iptables based kube-proxy reccomended for this demo # Get the iptables based kube-proxy reccomended for this demo
sudo wget https://github.com/projectcalico/calico-kubernetes/releases/download/v0.1.1/kube-proxy -P /usr/bin/ wget https://github.com/projectcalico/calico-kubernetes/releases/download/v0.1.1/kube-proxy
sudo cp kube-proxy /usr/bin/
sudo chmod +x /usr/bin/kube-proxy sudo chmod +x /usr/bin/kube-proxy
``` ```

View File

@@ -28,6 +28,10 @@ JSON and YAML formats are accepted.
\fB\-o\fP, \fB\-\-output\fP="" \fB\-o\fP, \fB\-\-output\fP=""
Output mode. Use "\-o name" for shorter output (resource/name). Output mode. Use "\-o name" for shorter output (resource/name).
.PP
\fB\-\-schema\-cache\-dir\fP="/tmp/kubectl.schema"
If non\-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
.PP .PP
\fB\-\-validate\fP=true \fB\-\-validate\fP=true
If true, use a schema to validate the input before sending it If true, use a schema to validate the input before sending it

View File

@@ -50,6 +50,10 @@ re\-use the labels from the resource it exposes.
\fB\-l\fP, \fB\-\-labels\fP="" \fB\-l\fP, \fB\-\-labels\fP=""
Labels to apply to the service created by this call. Labels to apply to the service created by this call.
.PP
\fB\-\-load\-balancer\-ip\fP=""
IP to assign to to the Load Balancer. If empty, an ephemeral IP will be created and used(cloud\-provider specific).
.PP .PP
\fB\-\-name\fP="" \fB\-\-name\fP=""
The name for the newly created object. The name for the newly created object.

View File

@@ -46,6 +46,10 @@ Please refer to the models in
\fB\-o\fP, \fB\-\-output\fP="" \fB\-o\fP, \fB\-\-output\fP=""
Output mode. Use "\-o name" for shorter output (resource/name). Output mode. Use "\-o name" for shorter output (resource/name).
.PP
\fB\-\-schema\-cache\-dir\fP="/tmp/kubectl.schema"
If non\-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
.PP .PP
\fB\-\-timeout\fP=0 \fB\-\-timeout\fP=0
Only relevant during a force replace. The length of time to wait before giving up on a delete of the old resource, zero means determine a timeout from the size of the object Only relevant during a force replace. The length of time to wait before giving up on a delete of the old resource, zero means determine a timeout from the size of the object

View File

@@ -60,6 +60,10 @@ existing replication controller and overwrite at least one (common) label in its
\fB\-\-rollback\fP=false \fB\-\-rollback\fP=false
If true, this is a request to abort an existing rollout that is partially rolled out. It effectively reverses current and next and runs a rollout If true, this is a request to abort an existing rollout that is partially rolled out. It effectively reverses current and next and runs a rollout
.PP
\fB\-\-schema\-cache\-dir\fP="/tmp/kubectl.schema"
If non\-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
.PP .PP
\fB\-a\fP, \fB\-\-show\-all\fP=false \fB\-a\fP, \fB\-\-show\-all\fP=false
When printing, show all resources (default hide terminated pods.) When printing, show all resources (default hide terminated pods.)

View File

@@ -50,6 +50,10 @@ Creates a replication controller to manage the created container(s).
\fB\-l\fP, \fB\-\-labels\fP="" \fB\-l\fP, \fB\-\-labels\fP=""
Labels to apply to the pod(s). Labels to apply to the pod(s).
.PP
\fB\-\-limits\fP=""
The resource requirement limits for this container. For example, 'cpu=200m,memory=512Mi'
.PP .PP
\fB\-\-no\-headers\fP=false \fB\-\-no\-headers\fP=false
When using the default output, don't print headers. When using the default output, don't print headers.
@@ -76,6 +80,10 @@ Creates a replication controller to manage the created container(s).
\fB\-r\fP, \fB\-\-replicas\fP=1 \fB\-r\fP, \fB\-\-replicas\fP=1
Number of replicas to create for this container. Default is 1. Number of replicas to create for this container. Default is 1.
.PP
\fB\-\-requests\fP=""
The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'
.PP .PP
\fB\-\-restart\fP="Always" \fB\-\-restart\fP="Always"
The restart policy for this Pod. Legal values [Always, OnFailure, Never]. If set to 'Always' a replication controller is created for this pod, if set to OnFailure or Never, only the Pod is created and \-\-replicas must be 1. Default 'Always' The restart policy for this Pod. Legal values [Always, OnFailure, Never]. If set to 'Always' a replication controller is created for this pod, if set to OnFailure or Never, only the Pod is created and \-\-replicas must be 1. Default 'Always'

View File

@@ -166,7 +166,7 @@ the same time, we can introduce an additional etcd event type:
Thus, we need to create the EtcdResync event, extend watch.Interface and Thus, we need to create the EtcdResync event, extend watch.Interface and
its implementations to support it and handle those events appropriately its implementations to support it and handle those events appropriately
in places like in places like
[Reflector](../../pkg/client/unversioned/cache/reflector.go) [Reflector](../../pkg/client/cache/reflector.go)
However, this might turn out to be unnecessary optimization if apiserver However, this might turn out to be unnecessary optimization if apiserver
will always keep up (which is possible in the new design). We will work will always keep up (which is possible in the new design). We will work

View File

@@ -88,7 +88,7 @@ use the full image name (e.g. gcr.io/my_project/image:tag).
All pods in a cluster will have read access to images in this registry. All pods in a cluster will have read access to images in this registry.
The kubelet kubelet will authenticate to GCR using the instance's The kubelet will authenticate to GCR using the instance's
Google service account. The service account on the instance Google service account. The service account on the instance
will have a `https://www.googleapis.com/auth/devstorage.read_only`, will have a `https://www.googleapis.com/auth/devstorage.read_only`,
so it can pull from the project's GCR, but not push. so it can pull from the project's GCR, but not push.

View File

@@ -61,6 +61,7 @@ $ cat pod.json | kubectl create -f -
``` ```
-f, --filename=[]: Filename, directory, or URL to file to use to create the resource -f, --filename=[]: Filename, directory, or URL to file to use to create the resource
-o, --output="": Output mode. Use "-o name" for shorter output (resource/name). -o, --output="": Output mode. Use "-o name" for shorter output (resource/name).
--schema-cache-dir="/tmp/kubectl.schema": If non-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
--validate[=true]: If true, use a schema to validate the input before sending it --validate[=true]: If true, use a schema to validate the input before sending it
``` ```
@@ -96,7 +97,7 @@ $ cat pod.json | kubectl create -f -
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.152429973 +0000 UTC ###### Auto generated by spf13/cobra at 2015-09-11 20:48:33.289761103 +0000 UTC
<!-- BEGIN MUNGE: GENERATED_ANALYTICS --> <!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_create.md?pixel)]() [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_create.md?pixel)]()

View File

@@ -45,7 +45,7 @@ selector for a new Service on the specified port. If no labels are specified, th
re-use the labels from the resource it exposes. re-use the labels from the resource it exposes.
``` ```
kubectl expose (-f FILENAME | TYPE NAME) --port=port [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [----external-ip=external-ip-of-service] [--type=type] kubectl expose (-f FILENAME | TYPE NAME) [--port=port] [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [----external-ip=external-ip-of-service] [--type=type]
``` ```
### Examples ### Examples
@@ -73,6 +73,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream
-f, --filename=[]: Filename, directory, or URL to a file identifying the resource to expose a service -f, --filename=[]: Filename, directory, or URL to a file identifying the resource to expose a service
--generator="service/v2": The name of the API generator to use. There are 2 generators: 'service/v1' and 'service/v2'. The only difference between them is that service port in v1 is named 'default', while it is left unnamed in v2. Default is 'service/v2'. --generator="service/v2": The name of the API generator to use. There are 2 generators: 'service/v1' and 'service/v2'. The only difference between them is that service port in v1 is named 'default', while it is left unnamed in v2. Default is 'service/v2'.
-l, --labels="": Labels to apply to the service created by this call. -l, --labels="": Labels to apply to the service created by this call.
--load-balancer-ip="": IP to assign to to the Load Balancer. If empty, an ephemeral IP will be created and used(cloud-provider specific).
--name="": The name for the newly created object. --name="": The name for the newly created object.
--no-headers[=false]: When using the default output, don't print headers. --no-headers[=false]: When using the default output, don't print headers.
-o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md]. -o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md].
@@ -121,7 +122,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.159044239 +0000 UTC ###### Auto generated by spf13/cobra at 2015-09-11 03:36:48.458259032 +0000 UTC
<!-- BEGIN MUNGE: GENERATED_ANALYTICS --> <!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_expose.md?pixel)]() [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_expose.md?pixel)]()

View File

@@ -74,6 +74,7 @@ kubectl replace --force -f ./pod.json
--force[=false]: Delete and re-create the specified resource --force[=false]: Delete and re-create the specified resource
--grace-period=-1: Only relevant during a force replace. Period of time in seconds given to the old resource to terminate gracefully. Ignored if negative. --grace-period=-1: Only relevant during a force replace. Period of time in seconds given to the old resource to terminate gracefully. Ignored if negative.
-o, --output="": Output mode. Use "-o name" for shorter output (resource/name). -o, --output="": Output mode. Use "-o name" for shorter output (resource/name).
--schema-cache-dir="/tmp/kubectl.schema": If non-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
--timeout=0: Only relevant during a force replace. The length of time to wait before giving up on a delete of the old resource, zero means determine a timeout from the size of the object --timeout=0: Only relevant during a force replace. The length of time to wait before giving up on a delete of the old resource, zero means determine a timeout from the size of the object
--validate[=true]: If true, use a schema to validate the input before sending it --validate[=true]: If true, use a schema to validate the input before sending it
``` ```
@@ -110,7 +111,7 @@ kubectl replace --force -f ./pod.json
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.153166598 +0000 UTC ###### Auto generated by spf13/cobra at 2015-09-11 20:48:33.290279625 +0000 UTC
<!-- BEGIN MUNGE: GENERATED_ANALYTICS --> <!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_replace.md?pixel)]() [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_replace.md?pixel)]()

View File

@@ -78,6 +78,7 @@ $ kubectl rolling-update frontend --image=image:v2
--output-version="": Output the formatted object with the given version (default api-version). --output-version="": Output the formatted object with the given version (default api-version).
--poll-interval=3s: Time delay between polling for replication controller status after the update. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". --poll-interval=3s: Time delay between polling for replication controller status after the update. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
--rollback[=false]: If true, this is a request to abort an existing rollout that is partially rolled out. It effectively reverses current and next and runs a rollout --rollback[=false]: If true, this is a request to abort an existing rollout that is partially rolled out. It effectively reverses current and next and runs a rollout
--schema-cache-dir="/tmp/kubectl.schema": If non-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
-a, --show-all[=false]: When printing, show all resources (default hide terminated pods.) -a, --show-all[=false]: When printing, show all resources (default hide terminated pods.)
--sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string. --sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string.
--template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. --template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
@@ -118,7 +119,7 @@ $ kubectl rolling-update frontend --image=image:v2
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.154895732 +0000 UTC ###### Auto generated by spf13/cobra at 2015-09-11 20:48:33.293748592 +0000 UTC
<!-- BEGIN MUNGE: GENERATED_ANALYTICS --> <!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_rolling-update.md?pixel)]() [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_rolling-update.md?pixel)]()

View File

@@ -87,12 +87,14 @@ $ kubectl run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>
--hostport=-1: The host port mapping for the container port. To demonstrate a single-machine container. --hostport=-1: The host port mapping for the container port. To demonstrate a single-machine container.
--image="": The image for the container to run. --image="": The image for the container to run.
-l, --labels="": Labels to apply to the pod(s). -l, --labels="": Labels to apply to the pod(s).
--limits="": The resource requirement limits for this container. For example, 'cpu=200m,memory=512Mi'
--no-headers[=false]: When using the default output, don't print headers. --no-headers[=false]: When using the default output, don't print headers.
-o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md]. -o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md].
--output-version="": Output the formatted object with the given version (default api-version). --output-version="": Output the formatted object with the given version (default api-version).
--overrides="": An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field. --overrides="": An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field.
--port=-1: The port that this container exposes. --port=-1: The port that this container exposes.
-r, --replicas=1: Number of replicas to create for this container. Default is 1. -r, --replicas=1: Number of replicas to create for this container. Default is 1.
--requests="": The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'
--restart="Always": The restart policy for this Pod. Legal values [Always, OnFailure, Never]. If set to 'Always' a replication controller is created for this pod, if set to OnFailure or Never, only the Pod is created and --replicas must be 1. Default 'Always' --restart="Always": The restart policy for this Pod. Legal values [Always, OnFailure, Never]. If set to 'Always' a replication controller is created for this pod, if set to OnFailure or Never, only the Pod is created and --replicas must be 1. Default 'Always'
-a, --show-all[=false]: When printing, show all resources (default hide terminated pods.) -a, --show-all[=false]: When printing, show all resources (default hide terminated pods.)
--sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string. --sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string.

View File

@@ -79,7 +79,7 @@ Note that replication controllers may themselves have labels and would generally
Pods may be removed from a replication controller's target set by changing their labels. This technique may be used to remove pods from service for debugging, data recovery, etc. Pods that are removed in this way will be replaced automatically (assuming that the number of replicas is not also changed). Pods may be removed from a replication controller's target set by changing their labels. This technique may be used to remove pods from service for debugging, data recovery, etc. Pods that are removed in this way will be replaced automatically (assuming that the number of replicas is not also changed).
Similarly, deleting a replication controller does not affect the pods it created. Its `replicas` field must first be set to 0 in order to delete the pods controlled. (Note that the client tool, kubectl, provides a single operation, [stop](kubectl/kubectl_stop.md) to delete both the replication controller and the pods it controls. However, there is no such operation in the API at the moment) Similarly, deleting a replication controller using the API does not affect the pods it created. Its `replicas` field must first be set to `0` in order to delete the pods controlled. (Note that the client tool, `kubectl`, provides a single operation, [delete](kubectl/kubectl_delete.md) to delete both the replication controller and the pods it controls. If you want to leave the pods running when deleting a replication controller, specify `--cascade=false`. However, there is no such operation in the API at the moment)
## Responsibilities of the replication controller ## Responsibilities of the replication controller

View File

@@ -144,7 +144,7 @@ secrets/build-robot-secret
Now you can confirm that the newly built secret is populated with an API token for the "build-robot" service account. Now you can confirm that the newly built secret is populated with an API token for the "build-robot" service account.
```console ```console
kubectl describe secrets/build-robot-secret $ kubectl describe secrets/build-robot-secret
Name: build-robot-secret Name: build-robot-secret
Namespace: default Namespace: default
Labels: <none> Labels: <none>

View File

@@ -433,6 +433,7 @@ information about the provisioned balancer will be published in the `Service`'s
} }
], ],
"clusterIP": "10.0.171.239", "clusterIP": "10.0.171.239",
"loadBalancerIP": "78.11.24.19",
"type": "LoadBalancer" "type": "LoadBalancer"
}, },
"status": { "status": {
@@ -448,7 +449,11 @@ information about the provisioned balancer will be published in the `Service`'s
``` ```
Traffic from the external load balancer will be directed at the backend `Pods`, Traffic from the external load balancer will be directed at the backend `Pods`,
though exactly how that works depends on the cloud provider. though exactly how that works depends on the cloud provider. Some cloud providers allow
the `loadBalancerIP` to be specified. In those cases, the load-balancer will be created
with the user-specified `loadBalancerIP`. If the `loadBalancerIP` field is not specified,
an ephemeral IP will be assigned to the loadBalancer. If the `loadBalancerIP` is specified, but the
cloud provider does not support the feature, the field will be ignored.
## Shortcomings ## Shortcomings

View File

@@ -194,6 +194,7 @@ func TestExampleObjectSchemas(t *testing.T) {
"../examples/glusterfs": { "../examples/glusterfs": {
"glusterfs-pod": &api.Pod{}, "glusterfs-pod": &api.Pod{},
"glusterfs-endpoints": &api.Endpoints{}, "glusterfs-endpoints": &api.Endpoints{},
"glusterfs-service": &api.Service{},
}, },
"../docs/user-guide/liveness": { "../docs/user-guide/liveness": {
"exec-liveness": &api.Pod{}, "exec-liveness": &api.Pod{},

View File

@@ -75,6 +75,15 @@ NAME ENDPOINTS
glusterfs-cluster 10.240.106.152:1,10.240.79.157:1 glusterfs-cluster 10.240.106.152:1,10.240.79.157:1
``` ```
We need also create a service for this endpoints, so that the endpoints will be persistented. We will add this service without a selector to tell Kubernetes we want to add its endpoints manually. You can see [glusterfs-service.json](glusterfs-service.json) for details.
Use this command to create the service:
```sh
$ kubectl create -f examples/glusterfs/glusterfs-service.json
```
### Create a POD ### Create a POD
The following *volume* spec in [glusterfs-pod.json](glusterfs-pod.json) illustrates a sample configuration. The following *volume* spec in [glusterfs-pod.json](glusterfs-pod.json) illustrates a sample configuration.

View File

@@ -0,0 +1,12 @@
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "glusterfs-cluster"
},
"spec": {
"ports": [
{"port": 1}
]
}
}

View File

@@ -46,7 +46,7 @@ This example shows how to build a simple, multi-tier web application using Kuber
- [Step Three: Fire up the replicated slave pods](#step-three-fire-up-the-replicated-slave-pods) - [Step Three: Fire up the replicated slave pods](#step-three-fire-up-the-replicated-slave-pods)
- [Step Four: Create the redis slave service](#step-four-create-the-redis-slave-service) - [Step Four: Create the redis slave service](#step-four-create-the-redis-slave-service)
- [Step Five: Create the frontend replicated pods](#step-five-create-the-frontend-replicated-pods) - [Step Five: Create the frontend replicated pods](#step-five-create-the-frontend-replicated-pods)
- [Step Six: Set up the guestbook frontend service.](#step-six-set-up-the-guestbook-frontend-service) - [Step Six: Set up the guestbook frontend service](#step-six-set-up-the-guestbook-frontend-service)
- [Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific)](#using-type-loadbalancer-for-the-frontend-service-cloud-provider-specific) - [Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific)](#using-type-loadbalancer-for-the-frontend-service-cloud-provider-specific)
- [Create the Frontend Service](#create-the-frontend-service) - [Create the Frontend Service](#create-the-frontend-service)
- [Accessing the guestbook site externally](#accessing-the-guestbook-site-externally) - [Accessing the guestbook site externally](#accessing-the-guestbook-site-externally)

View File

@@ -26,26 +26,27 @@ kube::golang::setup_env
genconversion=$(kube::util::find-binary "genconversion") genconversion=$(kube::util::find-binary "genconversion")
function generate_version() { function generate_version() {
local version=$1 local group_version=$1
local TMPFILE="/tmp/conversion_generated.$(date +%s).go" local TMPFILE="/tmp/conversion_generated.$(date +%s).go"
echo "Generating for ${version}" echo "Generating for ${group_version}"
sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > "$TMPFILE" sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > "$TMPFILE"
cat >> "$TMPFILE" <<EOF cat >> "$TMPFILE" <<EOF
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY \$KUBEROOT/hack/update-generated-conversions.sh // DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY \$KUBEROOT/hack/update-generated-conversions.sh
EOF EOF
"${genconversion}" -v "${version}" -f - >> "$TMPFILE" "${genconversion}" -v "${group_version}" -f - >> "$TMPFILE"
mv "$TMPFILE" "pkg/${version}/conversion_generated.go" mv "$TMPFILE" "pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/conversion_generated.go"
} }
DEFAULT_VERSIONS="api/v1 expapi/v1" # TODO(lavalamp): get this list by listing the pkg/apis/ directory?
VERSIONS=${VERSIONS:-$DEFAULT_VERSIONS} DEFAULT_GROUP_VERSIONS="api/v1 experimental/v1"
VERSIONS=${VERSIONS:-$DEFAULT_GROUP_VERSIONS}
for ver in $VERSIONS; do for ver in $VERSIONS; do
# Ensure that the version being processed is registered by setting # Ensure that the version being processed is registered by setting
# KUBE_API_VERSIONS. # KUBE_API_VERSIONS.
KUBE_API_VERSIONS="${ver##*/}" generate_version "${ver}" KUBE_API_VERSIONS="${ver##*/}" generate_version "${ver}"
done done

View File

@@ -25,42 +25,35 @@ kube::golang::setup_env
gendeepcopy=$(kube::util::find-binary "gendeepcopy") gendeepcopy=$(kube::util::find-binary "gendeepcopy")
function result_file_name() {
local version=$1
echo "pkg/${version}/deep_copy_generated.go"
}
function generate_version() { function generate_version() {
local version=$1 local group_version=$1
local TMPFILE="/tmp/deep_copy_generated.$(date +%s).go" local TMPFILE="/tmp/deep_copy_generated.$(date +%s).go"
echo "Generating for ${version}" echo "Generating for ${group_version}"
sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > $TMPFILE sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > $TMPFILE
cat >> $TMPFILE <<EOF cat >> $TMPFILE <<EOF
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY \$KUBEROOT/hack/update-generated-deep-copies.sh. // DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY \$KUBEROOT/hack/update-generated-deep-copies.sh.
EOF EOF
"${gendeepcopy}" -v "${version}" -f - -o "${version}=" >> "$TMPFILE" "${gendeepcopy}" -v "${group_version}" -f - -o "${group_version}=" >> "$TMPFILE"
mv "$TMPFILE" `result_file_name ${version}` local dest="pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/deep_copy_generated.go"
rm -f "${dest}"
mv "${TMPFILE}" "${dest}"
} }
function generate_deep_copies() { function generate_deep_copies() {
local versions="$@" local group_versions="$@"
# To avoid compile errors, remove the currently existing files. for ver in ${group_versions}; do
for ver in ${versions}; do # Ensure that the version being processed is registered by setting
rm -f `result_file_name ${ver}` # KUBE_API_VERSIONS.
done apiVersions="${ver##*/}"
for ver in ${versions}; do KUBE_API_VERSIONS="${apiVersions}" generate_version "${ver}"
# Ensure that the version being processed is registered by setting done
# KUBE_API_VERSIONS.
apiVersions="${ver##*/}"
KUBE_API_VERSIONS="${apiVersions}" generate_version "${ver}"
done
} }
DEFAULT_VERSIONS="api/ api/v1 expapi/ expapi/v1" DEFAULT_VERSIONS="api/ api/v1 experimental/ experimental/v1"
VERSIONS=${VERSIONS:-$DEFAULT_VERSIONS} VERSIONS=${VERSIONS:-$DEFAULT_VERSIONS}
generate_deep_copies "$VERSIONS" generate_deep_copies "$VERSIONS"

View File

@@ -40,7 +40,7 @@ find_files() {
\) -prune \ \) -prune \
\) \ \) \
\( -wholename '*pkg/api/v*/types.go' \ \( -wholename '*pkg/api/v*/types.go' \
-o -wholename '*pkg/expapi/v*/types.go' \ -o -wholename '*pkg/apis/*/v*/types.go' \
\) \)
} }
@@ -61,7 +61,7 @@ for file in $versioned_api_files; do
fi fi
done done
internal_types_files="${KUBE_ROOT}/pkg/api/types.go ${KUBE_ROOT}/pkg/expapi/types.go" internal_types_files="${KUBE_ROOT}/pkg/api/types.go ${KUBE_ROOT}/pkg/apis/experimental/types.go"
for internal_types_file in $internal_types_files; do for internal_types_file in $internal_types_files; do
if grep json: "${internal_types_file}" | grep -v // | grep description: ; then if grep json: "${internal_types_file}" | grep -v // | grep description: ; then
echo "Internal API types should not contain descriptions" echo "Internal API types should not contain descriptions"

View File

@@ -23,7 +23,7 @@ source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env kube::golang::setup_env
APIROOTS=${APIROOTS:-pkg/api pkg/expapi} APIROOTS=${APIROOTS:-pkg/api pkg/apis/experimental}
_tmp="${KUBE_ROOT}/_tmp" _tmp="${KUBE_ROOT}/_tmp"
cleanup() { cleanup() {

View File

@@ -25,7 +25,7 @@ kube::golang::setup_env
gendeepcopy=$(kube::util::find-binary "gendeepcopy") gendeepcopy=$(kube::util::find-binary "gendeepcopy")
APIROOTS=${APIROOTS:-pkg/api pkg/expapi} APIROOTS=${APIROOTS:-pkg/api pkg/apis/experimental}
_tmp="${KUBE_ROOT}/_tmp" _tmp="${KUBE_ROOT}/_tmp"
cleanup() { cleanup() {

View File

@@ -70,6 +70,10 @@ if [[ ${JOB_NAME} =~ ^kubernetes-.*-gce ]]; then
: ${E2E_MIN_STARTUP_PODS:="1"} : ${E2E_MIN_STARTUP_PODS:="1"}
: ${E2E_ZONE:="us-central1-f"} : ${E2E_ZONE:="us-central1-f"}
: ${NUM_MINIONS_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel : ${NUM_MINIONS_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel
elif [[ ${JOB_NAME} =~ ^kubernetes-.*-gke ]]; then
KUBERNETES_PROVIDER="gke"
: ${E2E_ZONE:="us-central1-f"}
fi fi
if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
@@ -84,8 +88,8 @@ if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
fi fi
fi fi
# Specialized tests which should be skipped by default for projects. # Specialized to skip when running reboot tests.
GCE_DEFAULT_SKIP_TESTS=( REBOOT_SKIP_TESTS=(
"Autoscaling\sSuite" "Autoscaling\sSuite"
"Skipped" "Skipped"
"Reboot" "Reboot"
@@ -93,6 +97,20 @@ GCE_DEFAULT_SKIP_TESTS=(
"Example" "Example"
) )
# Specialized tests which should be skipped by default for projects.
GCE_DEFAULT_SKIP_TESTS=(
"${REBOOT_SKIP_TESTS[@]}"
"Reboot")
# Tests which cannot be run on GKE, e.g. because they require
# master ssh access.
GKE_REQUIRED_SKIP_TESTS=(
"Nodes"
"Etcd\sFailure"
"MasterCerts"
"Shell"
)
# The following tests are known to be flaky, and are thus run only in their own # The following tests are known to be flaky, and are thus run only in their own
# -flaky- build variants. # -flaky- build variants.
GCE_FLAKY_TESTS=( GCE_FLAKY_TESTS=(
@@ -131,6 +149,7 @@ GCE_PARALLEL_SKIP_TESTS=(
GCE_PARALLEL_FLAKY_TESTS=( GCE_PARALLEL_FLAKY_TESTS=(
"DaemonRestart" "DaemonRestart"
"Elasticsearch" "Elasticsearch"
"Namespaces.*should\sdelete\sfast"
"PD" "PD"
"ServiceAccounts" "ServiceAccounts"
"Services.*change\sthe\stype" "Services.*change\sthe\stype"
@@ -345,6 +364,81 @@ case ${JOB_NAME} in
: ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"} : ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"}
: ${PROJECT:="k8s-jkns-e2e-gce-release"} : ${PROJECT:="k8s-jkns-e2e-gce-release"}
;; ;;
kubernetes-e2e-gke-prod)
: ${DOGFOOD_GCLOUD:="true"}
: ${E2E_CLUSTER_NAME:="jkns-gke-e2e-prod"}
: ${E2E_NETWORK:="e2e-gke-prod"}
: ${E2E_SET_CLUSTER_API_VERSION:=y}
: ${JENKINS_USE_SERVER_VERSION:=y}
: ${PROJECT:="k8s-jkns-e2e-gke-prod"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
;;
kubernetes-e2e-gke-staging)
: ${DOGFOOD_GCLOUD:="true"}
: ${GKE_API_ENDPOINT:="https://staging-container.sandbox.googleapis.com/"}
: ${E2E_CLUSTER_NAME:="jkns-gke-e2e-staging"}
: ${E2E_NETWORK:="e2e-gke-staging"}
: ${E2E_SET_CLUSTER_API_VERSION:=y}
: ${JENKINS_USE_SERVER_VERSION:=y}
: ${PROJECT:="k8s-jkns-e2e-gke-staging"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
;;
kubernetes-e2e-gke-test)
: ${DOGFOOD_GCLOUD:="true"}
: ${CLOUDSDK_BUCKET:="gs://cloud-sdk-build/testing/rc"}
: ${GKE_API_ENDPOINT:="https://test-container.sandbox.googleapis.com/"}
: ${E2E_CLUSTER_NAME:="jkns-gke-e2e-test"}
: ${E2E_NETWORK:="e2e-gke-test"}
: ${JENKINS_USE_RELEASE_TARS:=y}
: ${PROJECT:="k8s-jkns-e2e-gke-ci"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
;;
kubernetes-e2e-gke-ci)
: ${DOGFOOD_GCLOUD:="true"}
: ${CLOUDSDK_BUCKET:="gs://cloud-sdk-build/testing/staging"}
: ${GKE_API_ENDPOINT:="https://test-container.sandbox.googleapis.com/"}
: ${E2E_CLUSTER_NAME:="jkns-gke-e2e-ci"}
: ${E2E_NETWORK:="e2e-gke-ci"}
: ${E2E_SET_CLUSTER_API_VERSION:=y}
: ${PROJECT:="k8s-jkns-e2e-gke-ci"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
)"}
;;
kubernetes-e2e-gke-ci-reboot)
: ${DOGFOOD_GCLOUD:="true"}
: ${CLOUDSDK_BUCKET:="gs://cloud-sdk-build/testing/staging"}
: ${GKE_API_ENDPOINT:="https://test-container.sandbox.googleapis.com/"}
: ${E2E_CLUSTER_NAME:="jkns-gke-e2e-ci-reboot"}
: ${E2E_NETWORK:="e2e-gke-ci"}
: ${E2E_SET_CLUSTER_API_VERSION:=y}
: ${PROJECT:="k8s-jkns-e2e-gke-ci"}
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
${REBOOT_SKIP_TESTS[@]:+${REBOOT_SKIP_TESTS[@]}} \
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
)"}
;;
esac esac
# AWS variables # AWS variables
@@ -362,6 +456,13 @@ export KUBE_GCS_STAGING_PATH_SUFFIX=${KUBE_GCS_STAGING_PATH_SUFFIX:-}
export CLUSTER_NAME=${E2E_CLUSTER_NAME} export CLUSTER_NAME=${E2E_CLUSTER_NAME}
export ZONE=${E2E_ZONE} export ZONE=${E2E_ZONE}
export KUBE_GKE_NETWORK=${E2E_NETWORK} export KUBE_GKE_NETWORK=${E2E_NETWORK}
export E2E_SET_CLUSTER_API_VERSION=${E2E_SET_CLUSTER_API_VERSION:-}
export DOGFOOD_GCLOUD=${DOGFOOD_GCLOUD:-}
export CMD_GROUP=${CMD_GROUP:-}
if [[ ! -z "${GKE_API_ENDPOINT:-}" ]]; then
export CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER=${GKE_API_ENDPOINT}
fi
# Shared cluster variables # Shared cluster variables
export E2E_MIN_STARTUP_PODS=${E2E_MIN_STARTUP_PODS:-} export E2E_MIN_STARTUP_PODS=${E2E_MIN_STARTUP_PODS:-}
@@ -371,6 +472,7 @@ export MINION_SIZE=${MINION_SIZE:-}
export NUM_MINIONS=${NUM_MINIONS:-} export NUM_MINIONS=${NUM_MINIONS:-}
export PROJECT=${PROJECT:-} export PROJECT=${PROJECT:-}
export KUBERNETES_PROVIDER=${KUBERNETES_PROVIDER}
export PATH=${PATH}:/usr/local/go/bin export PATH=${PATH}:/usr/local/go/bin
export KUBE_SKIP_CONFIRMATIONS=y export KUBE_SKIP_CONFIRMATIONS=y
@@ -407,10 +509,13 @@ if [[ "${E2E_UP,,}" == "true" || "${JENKINS_FORCE_GET_TARS:-}" =~ ^[yY]$ ]]; the
# gcloud bug can cause racing component updates to stomp on each # gcloud bug can cause racing component updates to stomp on each
# other. # other.
export KUBE_SKIP_UPDATE=y export KUBE_SKIP_UPDATE=y
sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update -q" || true {
sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update preview -q" || true sudo flock -x -n 9
sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update alpha -q" || true gcloud components update -q || true
sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update beta -q" || true gcloud components update preview -q || true
gcloud components update alpha -q || true
gcloud components update beta -q || true
} 9>/var/run/lock/gcloud-components.lock
if [[ ! -z ${JENKINS_EXPLICIT_VERSION:-} ]]; then if [[ ! -z ${JENKINS_EXPLICIT_VERSION:-} ]]; then
# Use an explicit pinned version like "ci/v0.10.0-101-g6c814c4" or # Use an explicit pinned version like "ci/v0.10.0-101-g6c814c4" or
@@ -496,6 +601,21 @@ ARTIFACTS=${WORKSPACE}/_artifacts
mkdir -p ${ARTIFACTS} mkdir -p ${ARTIFACTS}
export E2E_REPORT_DIR=${ARTIFACTS} export E2E_REPORT_DIR=${ARTIFACTS}
### Pre Set Up ###
# Install gcloud from a custom path if provided. Used to test GKE with gcloud
# at HEAD, release candidate.
if [[ ! -z "${CLOUDSDK_BUCKET:-}" ]]; then
sudo gsutil -m cp -r "${CLOUDSDK_BUCKET}" ~
mv ~/$(basename "${CLOUDSDK_BUCKET}") ~/repo
mkdir ~/cloudsdk
tar zvxf ~/repo/google-cloud-sdk.tar.gz -C ~/cloudsdk
export CLOUDSDK_CORE_DISABLE_PROMPTS=1
export CLOUDSDK_COMPONENT_MANAGER_SNAPSHOT_URL=file://${HOME}/repo/components-2.json
~/cloudsdk/google-cloud-sdk/install.sh --disable-installation-options --bash-completion=false --path-update=false --usage-reporting=false
export PATH=${HOME}/cloudsdk/google-cloud-sdk/bin:${PATH}
export CLOUDSDK_CONFIG=/var/lib/jenkins/.config/gcloud
fi
### Set up ### ### Set up ###
if [[ "${E2E_UP,,}" == "true" ]]; then if [[ "${E2E_UP,,}" == "true" ]]; then
go run ./hack/e2e.go ${E2E_OPT} -v --down go run ./hack/e2e.go ${E2E_OPT} -v --down

View File

@@ -220,4 +220,35 @@ kube::util::analytics-link() {
echo "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/${path}?pixel)]()" echo "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/${path}?pixel)]()"
} }
# Takes a group/version and returns the path to its location on disk, sans
# "pkg". E.g.:
# * default behavior: experimental/v1 -> apis/experimental/v1
# * legacy behavior: api/v1 -> api/v1
# * Special handling for only a group: experimental -> apis/experimental
# * Special handling for only "api" group: api -> api
# * Very special handling for "v1": v1 -> api/v1
kube::util::group-version-to-pkg-path() {
local group_version="$1"
# Special cases first.
# TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api,
# moving the results to pkg/apis/api.
case "${group_version}" in
v1)
echo "api/v1"
;;
api)
echo "api/v1"
;;
api/*)
echo "${group_version}"
;;
api/*)
echo "${group_version}"
;;
*)
echo "apis/${group_version}"
;;
esac
}
# ex: ts=2 sw=2 et filetype=sh # ex: ts=2 sw=2 et filetype=sh

View File

@@ -86,6 +86,8 @@ API_CORS_ALLOWED_ORIGINS=${API_CORS_ALLOWED_ORIGINS:-"/127.0.0.1(:[0-9]+)?$,/loc
KUBELET_PORT=${KUBELET_PORT:-10250} KUBELET_PORT=${KUBELET_PORT:-10250}
LOG_LEVEL=${LOG_LEVEL:-3} LOG_LEVEL=${LOG_LEVEL:-3}
CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"} CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"}
RKT_PATH=${RKT_PATH:-""}
RKT_STAGE1_IMAGE=${RKT_STAGE1_IMAGE:-""}
CHAOS_CHANCE=${CHAOS_CHANCE:-0.0} CHAOS_CHANCE=${CHAOS_CHANCE:-0.0}
function test_apiserver_off { function test_apiserver_off {
@@ -251,6 +253,8 @@ function start_kubelet {
--v=${LOG_LEVEL} \ --v=${LOG_LEVEL} \
--chaos-chance="${CHAOS_CHANCE}" \ --chaos-chance="${CHAOS_CHANCE}" \
--container-runtime="${CONTAINER_RUNTIME}" \ --container-runtime="${CONTAINER_RUNTIME}" \
--rkt-path="${RKT_PATH}" \
--rkt-stage1-image="${RKT_STAGE1_IMAGE}" \
--hostname-override="127.0.0.1" \ --hostname-override="127.0.0.1" \
--address="127.0.0.1" \ --address="127.0.0.1" \
--api-servers="${API_HOST}:${API_PORT}" \ --api-servers="${API_HOST}:${API_PORT}" \

View File

@@ -711,19 +711,19 @@ __EOF__
### Create and delete persistent volume examples ### Create and delete persistent volume examples
# Pre-condition: no persistent volumes currently exist # Pre-condition: no persistent volumes currently exist
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" '' kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
# Command # Command
kubectl create -f docs/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}" kubectl create -f docs/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0001:' kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:'
kubectl delete pv pv0001 "${kube_flags[@]}" kubectl delete pv pv0001 "${kube_flags[@]}"
kubectl create -f docs/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}" kubectl create -f docs/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0002:' kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:'
kubectl delete pv pv0002 "${kube_flags[@]}" kubectl delete pv pv0002 "${kube_flags[@]}"
kubectl create -f docs/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}" kubectl create -f docs/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" 'pv0003:' kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:'
kubectl delete pv pv0003 "${kube_flags[@]}" kubectl delete pv pv0003 "${kube_flags[@]}"
# Post-condition: no PVs # Post-condition: no PVs
kube::test::get_object_assert pv "{{range.items}}{{.$id_field}}:{{end}}" '' kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
############################ ############################
# Persistent Volume Claims # # Persistent Volume Claims #
@@ -731,21 +731,21 @@ __EOF__
### Create and delete persistent volume claim examples ### Create and delete persistent volume claim examples
# Pre-condition: no persistent volume claims currently exist # Pre-condition: no persistent volume claims currently exist
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" '' kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command # Command
kubectl create -f docs/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}" kubectl create -f docs/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-1:' kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:'
kubectl delete pvc myclaim-1 "${kube_flags[@]}" kubectl delete pvc myclaim-1 "${kube_flags[@]}"
kubectl create -f docs/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}" kubectl create -f docs/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-2:' kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:'
kubectl delete pvc myclaim-2 "${kube_flags[@]}" kubectl delete pvc myclaim-2 "${kube_flags[@]}"
kubectl create -f docs/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}" kubectl create -f docs/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" 'myclaim-3:' kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:'
kubectl delete pvc myclaim-3 "${kube_flags[@]}" kubectl delete pvc myclaim-3 "${kube_flags[@]}"
# Post-condition: no PVCs # Post-condition: no PVCs
kube::test::get_object_assert pvc "{{range.items}}{{.$id_field}}:{{end}}" '' kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''

View File

@@ -24,14 +24,14 @@ source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env kube::golang::setup_env
function generate_version() { function generate_version() {
local groupVersion=$1 local group_version=$1
local TMPFILE="/tmp/types_swagger_doc_generated.$(date +%s).go" local TMPFILE="/tmp/types_swagger_doc_generated.$(date +%s).go"
echo "Generating swagger type docs for ${groupVersion}" echo "Generating swagger type docs for ${group_version}"
sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > $TMPFILE sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > $TMPFILE
echo "package ${groupVersion##*/}" >> $TMPFILE echo "package ${group_version##*/}" >> $TMPFILE
cat >> $TMPFILE <<EOF cat >> $TMPFILE <<EOF
// This file contains a collection of methods that can be used from go-resful to // This file contains a collection of methods that can be used from go-resful to
// generate Swagger API documentation for its models. Please read this PR for more // generate Swagger API documentation for its models. Please read this PR for more
@@ -46,21 +46,23 @@ function generate_version() {
// AUTO-GENERATED FUNCTIONS START HERE // AUTO-GENERATED FUNCTIONS START HERE
EOF EOF
GOPATH=$(godep path):$GOPATH go run cmd/genswaggertypedocs/swagger_type_docs.go -s "pkg/${groupVersion}/types.go" -f - >> $TMPFILE GOPATH=$(godep path):$GOPATH go run cmd/genswaggertypedocs/swagger_type_docs.go -s \
"pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/types.go" -f - \
>> $TMPFILE
echo "// AUTO-GENERATED FUNCTIONS END HERE" >> $TMPFILE echo "// AUTO-GENERATED FUNCTIONS END HERE" >> $TMPFILE
gofmt -w -s $TMPFILE gofmt -w -s $TMPFILE
mv $TMPFILE "pkg/${groupVersion}/types_swagger_doc_generated.go" mv $TMPFILE "pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/types_swagger_doc_generated.go"
} }
GROUP_VERSIONS="api/v1 expapi/v1" GROUP_VERSIONS="api/v1 experimental/v1"
# To avoid compile errors, remove the currently existing files. # To avoid compile errors, remove the currently existing files.
for groupVersion in $GROUP_VERSIONS; do for group_version in $GROUP_VERSIONS; do
rm -f "pkg/${groupVersion}/types_swagger_doc_generated.go" rm -f "pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/types_swagger_doc_generated.go"
done done
for groupVersion in $GROUP_VERSIONS; do for group_version in $GROUP_VERSIONS; do
generate_version "${groupVersion}" generate_version "${group_version}"
done done
"${KUBE_ROOT}/hack/update-swagger-spec.sh" "${KUBE_ROOT}/hack/update-swagger-spec.sh"

48
hack/update-gofmt.sh Executable file
View File

@@ -0,0 +1,48 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# GoFmt apparently is changing @ head...
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5') ]]; then
echo "Unknown go version '${GO_VERSION}', skipping gofmt."
exit 0
fi
cd "${KUBE_ROOT}"
find_files() {
find . -not \( \
\( \
-wholename './output' \
-o -wholename './_output' \
-o -wholename './release' \
-o -wholename './target' \
-o -wholename '*/third_party/*' \
-o -wholename '*/Godeps/*' \
\) -prune \
\) -name '*.go'
}
GOFMT="gofmt -s -w"
find_files | xargs $GOFMT

View File

@@ -1,4 +1,3 @@
cluster/addons/cluster-monitoring/README.md:Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). The kubelet will communicate with an instance of cAdvisor running on localhost and proxy container stats to Heapster. Kubelet will attempt to connect to cAdvisor on port 4194 by default but this port can be configured with kubelet's `--cadvisor-port` run flag. Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster).
cluster/addons/registry/images/Dockerfile:ADD run_proxy.sh /usr/bin/run_proxy cluster/addons/registry/images/Dockerfile:ADD run_proxy.sh /usr/bin/run_proxy
cluster/addons/registry/images/Dockerfile:CMD ["/usr/bin/run_proxy"] cluster/addons/registry/images/Dockerfile:CMD ["/usr/bin/run_proxy"]
cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full EC2 private dns name
@@ -39,7 +38,7 @@ cluster/saltbase/salt/kube-addons/kube-addons.sh:# Create admission_control obje
cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %} cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file + " " + min_request_timeout -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file + " " + min_request_timeout -%}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + " " + token_auth_file + " " + bind_address + " " + pillar['log_level'] + " " + advertise_address + " " + proxy_ssh_options -%} cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure-port=" + secure_port + " " + token_auth_file + " " + bind_address + " " + pillar['log_level'] + " " + advertise_address + " " + proxy_ssh_options -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%} cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + enable_horizontal_pod_autoscaler + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%}
cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers -%}
cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":6443" -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":6443" -%}
cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":7080" -%} cluster/saltbase/salt/kube-proxy/default: {% set api_servers_with_port = api_servers + ":7080" -%}

View File

@@ -1,274 +1,277 @@
accept-hosts accept-hosts
accept-paths accept-paths
account-for-pod-resources account-for-pod-resources
admission-control admission-control
admission-control-config-file admission-control-config-file
advertise-address advertise-address
advertised-address advertised-address
algorithm-provider algorithm-provider
all-namespaces all-namespaces
allocate-node-cidrs allocate-node-cidrs
allow-privileged allow-privileged
api-burst api-burst
api-prefix api-prefix
api-rate api-rate
api-servers api-servers
api-token api-token
api-version api-version
authorization-mode authorization-mode
authorization-policy-file authorization-policy-file
auth-path auth-path
basic-auth-file basic-auth-file
bench-pods bench-pods
bench-quiet bench-quiet
bench-tasks bench-tasks
bench-workers bench-workers
bind-address bind-address
bind-pods-burst bind-pods-burst
bind-pods-qps bind-pods-qps
cadvisor-port cadvisor-port
cert-dir cert-dir
certificate-authority certificate-authority
cgroup-root cgroup-root
chaos-chance chaos-chance
cleanup-iptables cleanup-iptables
client-ca-file client-ca-file
client-certificate client-certificate
client-key client-key
cloud-config cloud-config
cloud-provider cloud-provider
cluster-cidr cluster-cidr
cluster-dns cluster-dns
cluster-domain cluster-domain
cluster-name cluster-name
cluster-tag cluster-tag
concurrent-endpoint-syncs concurrent-endpoint-syncs
configure-cbr0 configure-cbr0
contain-pod-resources contain-pod-resources
container-port container-port
container-runtime container-runtime
cors-allowed-origins cors-allowed-origins
create-external-load-balancer create-external-load-balancer
current-release-pr current-release-pr
current-replicas current-replicas
default-container-cpu-limit default-container-cpu-limit
default-container-mem-limit default-container-mem-limit
delay-shutdown delay-shutdown
deleting-pods-burst deleting-pods-burst
deleting-pods-qps deleting-pods-qps
deployment-label-key deployment-label-key
dest-file dest-file
disable-filter disable-filter
docker-endpoint docker-endpoint
docker-exec-handler docker-exec-handler
dockercfg-path dockercfg-path
driver-port driver-port
dry-run dry-run
duration-sec duration-sec
e2e-output-dir e2e-output-dir
enable-debugging-handlers enable-debugging-handlers
enable-horizontal-pod-autoscaler enable-horizontal-pod-autoscaler
enable-server enable-server
etcd-config etcd-config
etcd-prefix etcd-prefix
etcd-server etcd-server
etcd-servers etcd-servers
event-burst event-burst
event-qps event-qps
event-ttl event-ttl
executor-bindall executor-bindall
executor-logv executor-logv
executor-path executor-path
executor-suicide-timeout executor-suicide-timeout
experimental-keystone-url experimental-keystone-url
experimental-prefix experimental-prefix
external-hostname external-hostname
external-ip external-ip
failover-timeout failover-timeout
file-check-frequency file-check-frequency
file-suffix file-suffix
forward-services forward-services
framework-name framework-name
framework-weburi framework-weburi
func-dest func-dest
fuzz-iters fuzz-iters
gce-project gce-project
gce-zone gce-zone
gke-cluster gke-cluster
google-json-key google-json-key
grace-period grace-period
ha-domain ha-domain
healthz-bind-address healthz-bind-address
healthz-port healthz-port
horizontal-pod-autoscaler-sync-period horizontal-pod-autoscaler-sync-period
hostname-override hostname-override
host-network-sources host-network-sources
http-check-frequency http-check-frequency
http-port http-port
ignore-not-found ignore-not-found
image-gc-high-threshold image-gc-high-threshold
image-gc-low-threshold image-gc-low-threshold
insecure-bind-address insecure-bind-address
insecure-port insecure-port
insecure-skip-tls-verify insecure-skip-tls-verify
iptables-sync-period iptables-sync-period
ir-data-source ir-data-source
ir-dbname ir-dbname
ir-influxdb-host ir-influxdb-host
ir-password ir-password
ir-user ir-user
jenkins-host jenkins-host
jenkins-jobs jenkins-jobs
km-path km-path
kubectl-path kubectl-path
kubelet-cadvisor-port kubelet-cadvisor-port
kubelet-certificate-authority kubelet-certificate-authority
kubelet-client-certificate kubelet-client-certificate
kubelet-client-key kubelet-client-key
kubelet-docker-endpoint kubelet-docker-endpoint
kubelet-host-network-sources kubelet-host-network-sources
kubelet-https kubelet-https
kubelet-network-plugin kubelet-network-plugin
kubelet-pod-infra-container-image kubelet-pod-infra-container-image
kubelet-port kubelet-port
kubelet-root-dir kubelet-root-dir
kubelet-sync-frequency kubelet-sync-frequency
kubelet-timeout kubelet-timeout
kube-master kube-master
label-columns label-columns
last-release-pr last-release-pr
legacy-userspace-proxy legacy-userspace-proxy
log-flush-frequency load-balancer-ip
long-running-request-regexp log-flush-frequency
low-diskspace-threshold-mb long-running-request-regexp
manifest-url low-diskspace-threshold-mb
manifest-url-header manifest-url
masquerade-all manifest-url-header
master-service-namespace masquerade-all
max-concurrency master-service-namespace
max-connection-bytes-per-sec max-concurrency
maximum-dead-containers max-connection-bytes-per-sec
maximum-dead-containers-per-container maximum-dead-containers
max-log-age maximum-dead-containers-per-container
max-log-backups max-log-age
max-log-size max-log-backups
max-outgoing-burst max-log-size
max-outgoing-qps max-outgoing-burst
max-pods max-outgoing-qps
max-requests-inflight max-pods
mesos-authentication-principal max-requests-inflight
mesos-authentication-provider mesos-authentication-principal
mesos-authentication-secret-file mesos-authentication-provider
mesos-cgroup-prefix mesos-authentication-secret-file
mesos-executor-cpus mesos-cgroup-prefix
mesos-executor-mem mesos-executor-cpus
mesos-master mesos-executor-mem
mesos-role mesos-master
mesos-user mesos-role
minimum-container-ttl-duration mesos-user
minion-max-log-age minimum-container-ttl-duration
minion-max-log-backups minion-max-log-age
minion-max-log-size minion-max-log-backups
minion-path-override minion-max-log-size
min-pr-number minion-path-override
min-request-timeout min-pr-number
namespace-sync-period min-request-timeout
network-plugin namespace-sync-period
network-plugin-dir network-plugin
node-instance-group network-plugin-dir
node-monitor-grace-period node-instance-group
node-monitor-period node-monitor-grace-period
node-startup-grace-period node-monitor-period
node-status-update-frequency node-startup-grace-period
node-sync-period node-status-update-frequency
no-headers node-sync-period
num-nodes no-headers
oidc-ca-file num-nodes
oidc-client-id oidc-ca-file
oidc-issuer-url oidc-client-id
oidc-username-claim oidc-issuer-url
oom-score-adj oidc-username-claim
output-version oom-score-adj
out-version output-version
path-override out-version
pod-cidr path-override
pod-eviction-timeout pod-cidr
pod-infra-container-image pod-eviction-timeout
pod-running pod-infra-container-image
policy-config-file pod-running
poll-interval policy-config-file
portal-net poll-interval
private-mountns portal-net
prom-push-gateway private-mountns
proxy-bindall prom-push-gateway
proxy-logv proxy-bindall
proxy-port-range proxy-logv
public-address-override proxy-port-range
pvclaimbinder-sync-period public-address-override
read-only-port pvclaimbinder-sync-period
really-crash-for-testing read-only-port
reconcile-cooldown really-crash-for-testing
reconcile-interval reconcile-cooldown
register-node reconcile-interval
register-retry-count register-node
registry-burst register-retry-count
registry-qps registry-burst
reject-methods registry-qps
reject-paths reject-methods
repo-root reject-paths
report-dir repo-root
required-contexts report-dir
resolv-conf required-contexts
resource-container resolv-conf
resource-quota-sync-period resource-container
resource-version resource-quota-sync-period
rkt-path resource-version
root-ca-file rkt-path
root-dir rkt-stage1-image
run-proxy root-ca-file
runtime-config root-dir
scheduler-config run-proxy
secure-port runtime-config
service-account-key-file scheduler-config
service-account-lookup schema-cache-dir
service-account-private-key-file secure-port
service-address service-account-key-file
service-cluster-ip-range service-account-lookup
service-node-port-range service-account-private-key-file
service-node-ports service-address
service-sync-period service-cluster-ip-range
session-affinity service-node-port-range
show-all service-node-ports
shutdown-fd service-sync-period
shutdown-fifo session-affinity
skip-munges show-all
sort-by shutdown-fd
source-file shutdown-fifo
ssh-keyfile skip-munges
ssh-user sort-by
static-pods-config source-file
stats-port ssh-keyfile
storage-version ssh-user
streaming-connection-idle-timeout static-pods-config
suicide-timeout stats-port
sync-frequency storage-version
system-container streaming-connection-idle-timeout
target-port suicide-timeout
tcp-services sync-frequency
tls-cert-file system-container
tls-private-key-file target-port
token-auth-file tcp-services
ttl-secs tls-cert-file
type-src tls-private-key-file
unix-socket token-auth-file
update-period ttl-secs
upgrade-target type-src
use-kubernetes-cluster-service unix-socket
user-whitelist update-period
watch-cache upgrade-target
watch-only use-kubernetes-cluster-service
whitelist-override-label user-whitelist
www-prefix watch-cache
retry_time watch-only
file_content_in_loop whitelist-override-label
cpu-cfs-quota www-prefix
retry_time
file_content_in_loop
cpu-cfs-quota

View File

@@ -302,6 +302,7 @@ func deepCopy_api_ContainerStateTerminated(in ContainerStateTerminated, out *Con
func deepCopy_api_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error { func deepCopy_api_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error {
out.Reason = in.Reason out.Reason = in.Reason
out.Message = in.Message
return nil return nil
} }
@@ -1958,6 +1959,7 @@ func deepCopy_api_ServicePort(in ServicePort, out *ServicePort, c *conversion.Cl
} }
func deepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cloner) error { func deepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cloner) error {
out.Type = in.Type
if in.Ports != nil { if in.Ports != nil {
out.Ports = make([]ServicePort, len(in.Ports)) out.Ports = make([]ServicePort, len(in.Ports))
for i := range in.Ports { for i := range in.Ports {
@@ -1977,7 +1979,6 @@ func deepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cl
out.Selector = nil out.Selector = nil
} }
out.ClusterIP = in.ClusterIP out.ClusterIP = in.ClusterIP
out.Type = in.Type
if in.ExternalIPs != nil { if in.ExternalIPs != nil {
out.ExternalIPs = make([]string, len(in.ExternalIPs)) out.ExternalIPs = make([]string, len(in.ExternalIPs))
for i := range in.ExternalIPs { for i := range in.ExternalIPs {
@@ -1986,6 +1987,7 @@ func deepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cl
} else { } else {
out.ExternalIPs = nil out.ExternalIPs = nil
} }
out.LoadBalancerIP = in.LoadBalancerIP
out.SessionAffinity = in.SessionAffinity out.SessionAffinity = in.SessionAffinity
return nil return nil
} }

View File

@@ -33,8 +33,8 @@ import (
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
_ "k8s.io/kubernetes/pkg/expapi" _ "k8s.io/kubernetes/pkg/apis/experimental"
_ "k8s.io/kubernetes/pkg/expapi/v1" _ "k8s.io/kubernetes/pkg/apis/experimental/v1"
flag "github.com/spf13/pflag" flag "github.com/spf13/pflag"
) )

View File

@@ -25,7 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/latest"
"k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/meta"
apiutil "k8s.io/kubernetes/pkg/api/util" apiutil "k8s.io/kubernetes/pkg/api/util"
explatest "k8s.io/kubernetes/pkg/expapi/latest" explatest "k8s.io/kubernetes/pkg/apis/experimental/latest"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
) )

View File

@@ -27,7 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/registered" "k8s.io/kubernetes/pkg/api/registered"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/apis/experimental"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@@ -121,15 +121,15 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
c.FuzzNoCustom(j) // fuzz self without calling this function again c.FuzzNoCustom(j) // fuzz self without calling this function again
//j.TemplateRef = nil // this is required for round trip //j.TemplateRef = nil // this is required for round trip
}, },
func(j *expapi.DeploymentStrategy, c fuzz.Continue) { func(j *experimental.DeploymentStrategy, c fuzz.Continue) {
c.FuzzNoCustom(j) // fuzz self without calling this function again c.FuzzNoCustom(j) // fuzz self without calling this function again
// Ensure that strategyType is one of valid values. // Ensure that strategyType is one of valid values.
strategyTypes := []expapi.DeploymentType{expapi.DeploymentRecreate, expapi.DeploymentRollingUpdate} strategyTypes := []experimental.DeploymentType{experimental.DeploymentRecreate, experimental.DeploymentRollingUpdate}
j.Type = strategyTypes[c.Rand.Intn(len(strategyTypes))] j.Type = strategyTypes[c.Rand.Intn(len(strategyTypes))]
if j.Type != expapi.DeploymentRollingUpdate { if j.Type != experimental.DeploymentRollingUpdate {
j.RollingUpdate = nil j.RollingUpdate = nil
} else { } else {
rollingUpdate := expapi.RollingUpdateDeployment{} rollingUpdate := experimental.RollingUpdateDeployment{}
if c.RandBool() { if c.RandBool() {
rollingUpdate.MaxUnavailable = util.NewIntOrStringFromInt(int(c.RandUint64())) rollingUpdate.MaxUnavailable = util.NewIntOrStringFromInt(int(c.RandUint64()))
rollingUpdate.MaxSurge = util.NewIntOrStringFromInt(int(c.RandUint64())) rollingUpdate.MaxSurge = util.NewIntOrStringFromInt(int(c.RandUint64()))
@@ -351,7 +351,7 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
c.FuzzNoCustom(n) c.FuzzNoCustom(n)
n.Spec.ExternalID = "external" n.Spec.ExternalID = "external"
}, },
func(s *expapi.APIVersion, c fuzz.Continue) { func(s *experimental.APIVersion, c fuzz.Continue) {
// We can't use c.RandString() here because it may generate empty // We can't use c.RandString() here because it may generate empty
// string, which will cause tests failure. // string, which will cause tests failure.
s.APIGroup = "something" s.APIGroup = "something"

View File

@@ -835,8 +835,10 @@ const (
) )
type ContainerStateWaiting struct { type ContainerStateWaiting struct {
// Reason could be pulling image, // A brief CamelCase string indicating details about why the container is in waiting state.
Reason string `json:"reason,omitempty"` Reason string `json:"reason,omitempty"`
// A human-readable message indicating details about why the container is in waiting state.
Message string `json:"message,omitempty"`
} }
type ContainerStateRunning struct { type ContainerStateRunning struct {
@@ -1185,6 +1187,9 @@ type LoadBalancerIngress struct {
// ServiceSpec describes the attributes that a user creates on a service // ServiceSpec describes the attributes that a user creates on a service
type ServiceSpec struct { type ServiceSpec struct {
// Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
Type ServiceType `json:"type,omitempty"`
// Required: The list of ports that are exposed by this service. // Required: The list of ports that are exposed by this service.
Ports []ServicePort `json:"ports"` Ports []ServicePort `json:"ports"`
@@ -1200,13 +1205,17 @@ type ServiceSpec struct {
// None can be specified for headless services when proxying is not required // None can be specified for headless services when proxying is not required
ClusterIP string `json:"clusterIP,omitempty"` ClusterIP string `json:"clusterIP,omitempty"`
// Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer
Type ServiceType `json:"type,omitempty"`
// ExternalIPs are used by external load balancers, or can be set by // ExternalIPs are used by external load balancers, or can be set by
// users to handle external traffic that arrives at a node. // users to handle external traffic that arrives at a node.
ExternalIPs []string `json:"externalIPs,omitempty"` ExternalIPs []string `json:"externalIPs,omitempty"`
// Only applies to Service Type: LoadBalancer
// LoadBalancer will get created with the IP specified in this field.
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
LoadBalancerIP string `json:"loadBalancerIP,omitempty"`
// Required: Supports "ClientIP" and "None". Used to maintain session affinity. // Required: Supports "ClientIP" and "None". Used to maintain session affinity.
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"` SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"`
} }

View File

@@ -340,6 +340,7 @@ func convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.Conta
defaulting.(func(*api.ContainerStateWaiting))(in) defaulting.(func(*api.ContainerStateWaiting))(in)
} }
out.Reason = in.Reason out.Reason = in.Reason
out.Message = in.Message
return nil return nil
} }
@@ -2172,6 +2173,7 @@ func convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *Service
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.ServiceSpec))(in) defaulting.(func(*api.ServiceSpec))(in)
} }
out.Type = ServiceType(in.Type)
if in.Ports != nil { if in.Ports != nil {
out.Ports = make([]ServicePort, len(in.Ports)) out.Ports = make([]ServicePort, len(in.Ports))
for i := range in.Ports { for i := range in.Ports {
@@ -2191,7 +2193,6 @@ func convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *Service
out.Selector = nil out.Selector = nil
} }
out.ClusterIP = in.ClusterIP out.ClusterIP = in.ClusterIP
out.Type = ServiceType(in.Type)
if in.ExternalIPs != nil { if in.ExternalIPs != nil {
out.ExternalIPs = make([]string, len(in.ExternalIPs)) out.ExternalIPs = make([]string, len(in.ExternalIPs))
for i := range in.ExternalIPs { for i := range in.ExternalIPs {
@@ -2200,6 +2201,7 @@ func convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *Service
} else { } else {
out.ExternalIPs = nil out.ExternalIPs = nil
} }
out.LoadBalancerIP = in.LoadBalancerIP
out.SessionAffinity = ServiceAffinity(in.SessionAffinity) out.SessionAffinity = ServiceAffinity(in.SessionAffinity)
return nil return nil
} }
@@ -2742,6 +2744,7 @@ func convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *Container
defaulting.(func(*ContainerStateWaiting))(in) defaulting.(func(*ContainerStateWaiting))(in)
} }
out.Reason = in.Reason out.Reason = in.Reason
out.Message = in.Message
return nil return nil
} }
@@ -4603,6 +4606,7 @@ func convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.Service
out.ExternalIPs = nil out.ExternalIPs = nil
} }
out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity) out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity)
out.LoadBalancerIP = in.LoadBalancerIP
return nil return nil
} }

View File

@@ -317,6 +317,7 @@ func deepCopy_v1_ContainerStateTerminated(in ContainerStateTerminated, out *Cont
func deepCopy_v1_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error { func deepCopy_v1_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error {
out.Reason = in.Reason out.Reason = in.Reason
out.Message = in.Message
return nil return nil
} }
@@ -1992,6 +1993,7 @@ func deepCopy_v1_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Clo
out.ExternalIPs = nil out.ExternalIPs = nil
} }
out.SessionAffinity = in.SessionAffinity out.SessionAffinity = in.SessionAffinity
out.LoadBalancerIP = in.LoadBalancerIP
return nil return nil
} }

View File

@@ -1036,8 +1036,10 @@ const (
// ContainerStateWaiting is a waiting state of a container. // ContainerStateWaiting is a waiting state of a container.
type ContainerStateWaiting struct { type ContainerStateWaiting struct {
// (brief) reason the container is not yet running, such as pulling its image. // (brief) reason the container is not yet running.
Reason string `json:"reason,omitempty"` Reason string `json:"reason,omitempty"`
// Message regarding why the container is not yet running.
Message string `json:"message,omitempty"`
} }
// ContainerStateRunning is a running state of a container. // ContainerStateRunning is a running state of a container.
@@ -1509,6 +1511,13 @@ type ServiceSpec struct {
// Defaults to None. // Defaults to None.
// More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"` SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"`
// Only applies to Service Type: LoadBalancer
// LoadBalancer will get created with the IP specified in this field.
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
LoadBalancerIP string `json:"loadBalancerIP,omitempty"`
} }
// ServicePort conatins information on service's port. // ServicePort conatins information on service's port.

View File

@@ -199,8 +199,9 @@ func (ContainerStateTerminated) SwaggerDoc() map[string]string {
} }
var map_ContainerStateWaiting = map[string]string{ var map_ContainerStateWaiting = map[string]string{
"": "ContainerStateWaiting is a waiting state of a container.", "": "ContainerStateWaiting is a waiting state of a container.",
"reason": "(brief) reason the container is not yet running, such as pulling its image.", "reason": "(brief) reason the container is not yet running.",
"message": "Message regarding why the container is not yet running.",
} }
func (ContainerStateWaiting) SwaggerDoc() map[string]string { func (ContainerStateWaiting) SwaggerDoc() map[string]string {
@@ -1272,6 +1273,7 @@ var map_ServiceSpec = map[string]string{
"type": "Type of exposed service. Must be ClusterIP, NodePort, or LoadBalancer. Defaults to ClusterIP. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#external-services", "type": "Type of exposed service. Must be ClusterIP, NodePort, or LoadBalancer. Defaults to ClusterIP. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#external-services",
"externalIPs": "ExternalIPs are used by external load balancers, or can be set by users to handle external traffic that arrives at a node. Externally visible IPs (e.g. load balancers) that should be proxied to this service.", "externalIPs": "ExternalIPs are used by external load balancers, or can be set by users to handle external traffic that arrives at a node. Externally visible IPs (e.g. load balancers) that should be proxied to this service.",
"sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies", "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies",
"loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.",
} }
func (ServiceSpec) SwaggerDoc() map[string]string { func (ServiceSpec) SwaggerDoc() map[string]string {

View File

@@ -1458,16 +1458,27 @@ func ValidateLimitRange(limitRange *api.LimitRange) errs.ValidationErrorList {
keys.Insert(string(k)) keys.Insert(string(k))
min[string(k)] = q min[string(k)] = q
} }
for k, q := range limit.Default {
allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].default[%s]", i, k))...) if limit.Type == api.LimitTypePod {
keys.Insert(string(k)) if len(limit.Default) > 0 {
defaults[string(k)] = q allErrs = append(allErrs, errs.NewFieldInvalid("spec.limits[%d].default", limit.Default, "Default is not supported when limit type is Pod"))
} }
for k, q := range limit.DefaultRequest { if len(limit.DefaultRequest) > 0 {
allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k))...) allErrs = append(allErrs, errs.NewFieldInvalid("spec.limits[%d].defaultRequest", limit.DefaultRequest, "DefaultRequest is not supported when limit type is Pod"))
keys.Insert(string(k)) }
defaultRequests[string(k)] = q } else {
for k, q := range limit.Default {
allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].default[%s]", i, k))...)
keys.Insert(string(k))
defaults[string(k)] = q
}
for k, q := range limit.DefaultRequest {
allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k))...)
keys.Insert(string(k))
defaultRequests[string(k)] = q
}
} }
for k := range limit.MaxLimitRequestRatio { for k := range limit.MaxLimitRequestRatio {
allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].maxLimitRequestRatio[%s]", i, k))...) allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].maxLimitRequestRatio[%s]", i, k))...)
} }
@@ -1479,38 +1490,26 @@ func ValidateLimitRange(limitRange *api.LimitRange) errs.ValidationErrorList {
defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k] defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k]
if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 { if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 {
minQuantity := limit.Min[api.ResourceName(k)]
maxQuantity := limit.Max[api.ResourceName(k)]
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].min[%s]", i, k), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String()))) allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].min[%s]", i, k), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String())))
} }
if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 { if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 {
minQuantity := limit.Min[api.ResourceName(k)]
defaultRequestQuantity := limit.DefaultRequest[api.ResourceName(k)]
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String()))) allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String())))
} }
if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 { if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 {
maxQuantity := limit.Max[api.ResourceName(k)]
defaultRequestQuantity := limit.DefaultRequest[api.ResourceName(k)]
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String()))) allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String())))
} }
if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 { if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 {
defaultQuantity := limit.Default[api.ResourceName(k)]
defaultRequestQuantity := limit.DefaultRequest[api.ResourceName(k)]
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String()))) allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String())))
} }
if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 { if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 {
minQuantity := limit.Min[api.ResourceName(k)]
defaultQuantity := limit.Default[api.ResourceName(k)]
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].default[%s]", i, k), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String()))) allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].default[%s]", i, k), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String())))
} }
if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 { if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 {
maxQuantity := limit.Max[api.ResourceName(k)]
defaultQuantity := limit.Default[api.ResourceName(k)]
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].default[%s]", i, k), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String()))) allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].default[%s]", i, k), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String())))
} }
} }

View File

@@ -2911,6 +2911,12 @@ func TestValidateLimitRange(t *testing.T) {
Type: api.LimitTypePod, Type: api.LimitTypePod,
Max: getResourceList("100m", "10000Mi"), Max: getResourceList("100m", "10000Mi"),
Min: getResourceList("5m", "100Mi"), Min: getResourceList("5m", "100Mi"),
MaxLimitRequestRatio: getResourceList("10", ""),
},
{
Type: api.LimitTypeContainer,
Max: getResourceList("100m", "10000Mi"),
Min: getResourceList("5m", "100Mi"),
Default: getResourceList("50m", "500Mi"), Default: getResourceList("50m", "500Mi"),
DefaultRequest: getResourceList("10m", "200Mi"), DefaultRequest: getResourceList("10m", "200Mi"),
MaxLimitRequestRatio: getResourceList("10", ""), MaxLimitRequestRatio: getResourceList("10", ""),
@@ -2923,7 +2929,7 @@ func TestValidateLimitRange(t *testing.T) {
spec: api.LimitRangeSpec{ spec: api.LimitRangeSpec{
Limits: []api.LimitRangeItem{ Limits: []api.LimitRangeItem{
{ {
Type: api.LimitTypePod, Type: api.LimitTypeContainer,
Max: getResourceList("100m", "10000T"), Max: getResourceList("100m", "10000T"),
Min: getResourceList("5m", "100Mi"), Min: getResourceList("5m", "100Mi"),
Default: getResourceList("50m", "500Mi"), Default: getResourceList("50m", "500Mi"),
@@ -2978,6 +2984,32 @@ func TestValidateLimitRange(t *testing.T) {
}}, }},
"", "",
}, },
"default-limit-type-pod": {
api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{
Limits: []api.LimitRangeItem{
{
Type: api.LimitTypePod,
Max: getResourceList("100m", "10000m"),
Min: getResourceList("0m", "100m"),
Default: getResourceList("10m", "100m"),
},
},
}},
"Default is not supported when limit type is Pod",
},
"default-request-limit-type-pod": {
api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{
Limits: []api.LimitRangeItem{
{
Type: api.LimitTypePod,
Max: getResourceList("100m", "10000m"),
Min: getResourceList("0m", "100m"),
DefaultRequest: getResourceList("10m", "100m"),
},
},
}},
"DefaultRequest is not supported when limit type is Pod",
},
"min value 100m is greater than max value 10m": { "min value 100m is greater than max value 10m": {
api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{
Limits: []api.LimitRangeItem{ Limits: []api.LimitRangeItem{
@@ -2994,7 +3026,7 @@ func TestValidateLimitRange(t *testing.T) {
api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{
Limits: []api.LimitRangeItem{ Limits: []api.LimitRangeItem{
{ {
Type: api.LimitTypePod, Type: api.LimitTypeContainer,
Max: getResourceList("1", ""), Max: getResourceList("1", ""),
Min: getResourceList("100m", ""), Min: getResourceList("100m", ""),
Default: getResourceList("2000m", ""), Default: getResourceList("2000m", ""),
@@ -3007,7 +3039,7 @@ func TestValidateLimitRange(t *testing.T) {
api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{
Limits: []api.LimitRangeItem{ Limits: []api.LimitRangeItem{
{ {
Type: api.LimitTypePod, Type: api.LimitTypeContainer,
Max: getResourceList("1", ""), Max: getResourceList("1", ""),
Min: getResourceList("100m", ""), Min: getResourceList("100m", ""),
DefaultRequest: getResourceList("2000m", ""), DefaultRequest: getResourceList("2000m", ""),

View File

@@ -16,7 +16,7 @@ limitations under the License.
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. // DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh.
package expapi package experimental
import ( import (
time "time" time "time"
@@ -757,29 +757,29 @@ func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c
return nil return nil
} }
func deepCopy_expapi_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error { func deepCopy_experimental_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error {
out.Name = in.Name out.Name = in.Name
out.APIGroup = in.APIGroup out.APIGroup = in.APIGroup
return nil return nil
} }
func deepCopy_expapi_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error { func deepCopy_experimental_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err return err
} }
if err := deepCopy_expapi_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil { if err := deepCopy_experimental_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil {
return err return err
} }
if err := deepCopy_expapi_DaemonSetStatus(in.Status, &out.Status, c); err != nil { if err := deepCopy_experimental_DaemonSetStatus(in.Status, &out.Status, c); err != nil {
return err return err
} }
return nil return nil
} }
func deepCopy_expapi_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error { func deepCopy_experimental_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@@ -789,7 +789,7 @@ func deepCopy_expapi_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conv
if in.Items != nil { if in.Items != nil {
out.Items = make([]DaemonSet, len(in.Items)) out.Items = make([]DaemonSet, len(in.Items))
for i := range in.Items { for i := range in.Items {
if err := deepCopy_expapi_DaemonSet(in.Items[i], &out.Items[i], c); err != nil { if err := deepCopy_experimental_DaemonSet(in.Items[i], &out.Items[i], c); err != nil {
return err return err
} }
} }
@@ -799,7 +799,7 @@ func deepCopy_expapi_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conv
return nil return nil
} }
func deepCopy_expapi_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error { func deepCopy_experimental_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error {
if in.Selector != nil { if in.Selector != nil {
out.Selector = make(map[string]string) out.Selector = make(map[string]string)
for key, val := range in.Selector { for key, val := range in.Selector {
@@ -819,30 +819,30 @@ func deepCopy_expapi_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conv
return nil return nil
} }
func deepCopy_expapi_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error { func deepCopy_experimental_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error {
out.CurrentNumberScheduled = in.CurrentNumberScheduled out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled
return nil return nil
} }
func deepCopy_expapi_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error { func deepCopy_experimental_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err return err
} }
if err := deepCopy_expapi_DeploymentSpec(in.Spec, &out.Spec, c); err != nil { if err := deepCopy_experimental_DeploymentSpec(in.Spec, &out.Spec, c); err != nil {
return err return err
} }
if err := deepCopy_expapi_DeploymentStatus(in.Status, &out.Status, c); err != nil { if err := deepCopy_experimental_DeploymentStatus(in.Status, &out.Status, c); err != nil {
return err return err
} }
return nil return nil
} }
func deepCopy_expapi_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error { func deepCopy_experimental_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@@ -852,7 +852,7 @@ func deepCopy_expapi_DeploymentList(in DeploymentList, out *DeploymentList, c *c
if in.Items != nil { if in.Items != nil {
out.Items = make([]Deployment, len(in.Items)) out.Items = make([]Deployment, len(in.Items))
for i := range in.Items { for i := range in.Items {
if err := deepCopy_expapi_Deployment(in.Items[i], &out.Items[i], c); err != nil { if err := deepCopy_experimental_Deployment(in.Items[i], &out.Items[i], c); err != nil {
return err return err
} }
} }
@@ -862,7 +862,7 @@ func deepCopy_expapi_DeploymentList(in DeploymentList, out *DeploymentList, c *c
return nil return nil
} }
func deepCopy_expapi_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error { func deepCopy_experimental_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error {
out.Replicas = in.Replicas out.Replicas = in.Replicas
if in.Selector != nil { if in.Selector != nil {
out.Selector = make(map[string]string) out.Selector = make(map[string]string)
@@ -880,24 +880,24 @@ func deepCopy_expapi_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *c
} else { } else {
out.Template = nil out.Template = nil
} }
if err := deepCopy_expapi_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil { if err := deepCopy_experimental_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil {
return err return err
} }
out.UniqueLabelKey = in.UniqueLabelKey out.UniqueLabelKey = in.UniqueLabelKey
return nil return nil
} }
func deepCopy_expapi_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error { func deepCopy_experimental_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error {
out.Replicas = in.Replicas out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas out.UpdatedReplicas = in.UpdatedReplicas
return nil return nil
} }
func deepCopy_expapi_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error { func deepCopy_experimental_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error {
out.Type = in.Type out.Type = in.Type
if in.RollingUpdate != nil { if in.RollingUpdate != nil {
out.RollingUpdate = new(RollingUpdateDeployment) out.RollingUpdate = new(RollingUpdateDeployment)
if err := deepCopy_expapi_RollingUpdateDeployment(*in.RollingUpdate, out.RollingUpdate, c); err != nil { if err := deepCopy_experimental_RollingUpdateDeployment(*in.RollingUpdate, out.RollingUpdate, c); err != nil {
return err return err
} }
} else { } else {
@@ -906,19 +906,19 @@ func deepCopy_expapi_DeploymentStrategy(in DeploymentStrategy, out *DeploymentSt
return nil return nil
} }
func deepCopy_expapi_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error { func deepCopy_experimental_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err return err
} }
if err := deepCopy_expapi_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil { if err := deepCopy_experimental_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil {
return err return err
} }
if in.Status != nil { if in.Status != nil {
out.Status = new(HorizontalPodAutoscalerStatus) out.Status = new(HorizontalPodAutoscalerStatus)
if err := deepCopy_expapi_HorizontalPodAutoscalerStatus(*in.Status, out.Status, c); err != nil { if err := deepCopy_experimental_HorizontalPodAutoscalerStatus(*in.Status, out.Status, c); err != nil {
return err return err
} }
} else { } else {
@@ -927,7 +927,7 @@ func deepCopy_expapi_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *Ho
return nil return nil
} }
func deepCopy_expapi_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error { func deepCopy_experimental_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@@ -937,7 +937,7 @@ func deepCopy_expapi_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList,
if in.Items != nil { if in.Items != nil {
out.Items = make([]HorizontalPodAutoscaler, len(in.Items)) out.Items = make([]HorizontalPodAutoscaler, len(in.Items))
for i := range in.Items { for i := range in.Items {
if err := deepCopy_expapi_HorizontalPodAutoscaler(in.Items[i], &out.Items[i], c); err != nil { if err := deepCopy_experimental_HorizontalPodAutoscaler(in.Items[i], &out.Items[i], c); err != nil {
return err return err
} }
} }
@@ -947,10 +947,10 @@ func deepCopy_expapi_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList,
return nil return nil
} }
func deepCopy_expapi_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { func deepCopy_experimental_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error {
if in.ScaleRef != nil { if in.ScaleRef != nil {
out.ScaleRef = new(SubresourceReference) out.ScaleRef = new(SubresourceReference)
if err := deepCopy_expapi_SubresourceReference(*in.ScaleRef, out.ScaleRef, c); err != nil { if err := deepCopy_experimental_SubresourceReference(*in.ScaleRef, out.ScaleRef, c); err != nil {
return err return err
} }
} else { } else {
@@ -958,18 +958,18 @@ func deepCopy_expapi_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec,
} }
out.MinCount = in.MinCount out.MinCount = in.MinCount
out.MaxCount = in.MaxCount out.MaxCount = in.MaxCount
if err := deepCopy_expapi_ResourceConsumption(in.Target, &out.Target, c); err != nil { if err := deepCopy_experimental_ResourceConsumption(in.Target, &out.Target, c); err != nil {
return err return err
} }
return nil return nil
} }
func deepCopy_expapi_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { func deepCopy_experimental_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error {
out.CurrentReplicas = in.CurrentReplicas out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas out.DesiredReplicas = in.DesiredReplicas
if in.CurrentConsumption != nil { if in.CurrentConsumption != nil {
out.CurrentConsumption = new(ResourceConsumption) out.CurrentConsumption = new(ResourceConsumption)
if err := deepCopy_expapi_ResourceConsumption(*in.CurrentConsumption, out.CurrentConsumption, c); err != nil { if err := deepCopy_experimental_ResourceConsumption(*in.CurrentConsumption, out.CurrentConsumption, c); err != nil {
return err return err
} }
} else { } else {
@@ -986,14 +986,129 @@ func deepCopy_expapi_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerSta
return nil return nil
} }
func deepCopy_expapi_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error { func deepCopy_experimental_Job(in Job, out *Job, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_experimental_JobSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_experimental_JobStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func deepCopy_experimental_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error {
out.Type = in.Type
out.Status = in.Status
if err := deepCopy_util_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil {
return err
}
if err := deepCopy_util_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil {
return err
}
out.Reason = in.Reason
out.Message = in.Message
return nil
}
func deepCopy_experimental_JobList(in JobList, out *JobList, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_api_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]Job, len(in.Items))
for i := range in.Items {
if err := deepCopy_experimental_Job(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func deepCopy_experimental_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error {
if in.Parallelism != nil {
out.Parallelism = new(int)
*out.Parallelism = *in.Parallelism
} else {
out.Parallelism = nil
}
if in.Completions != nil {
out.Completions = new(int)
*out.Completions = *in.Completions
} else {
out.Completions = nil
}
if in.Selector != nil {
out.Selector = make(map[string]string)
for key, val := range in.Selector {
out.Selector[key] = val
}
} else {
out.Selector = nil
}
if in.Template != nil {
out.Template = new(api.PodTemplateSpec)
if err := deepCopy_api_PodTemplateSpec(*in.Template, out.Template, c); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func deepCopy_experimental_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error {
if in.Conditions != nil {
out.Conditions = make([]JobCondition, len(in.Conditions))
for i := range in.Conditions {
if err := deepCopy_experimental_JobCondition(in.Conditions[i], &out.Conditions[i], c); err != nil {
return err
}
}
} else {
out.Conditions = nil
}
if in.StartTime != nil {
out.StartTime = new(util.Time)
if err := deepCopy_util_Time(*in.StartTime, out.StartTime, c); err != nil {
return err
}
} else {
out.StartTime = nil
}
if in.CompletionTime != nil {
out.CompletionTime = new(util.Time)
if err := deepCopy_util_Time(*in.CompletionTime, out.CompletionTime, c); err != nil {
return err
}
} else {
out.CompletionTime = nil
}
out.Active = in.Active
out.Successful = in.Successful
out.Unsuccessful = in.Unsuccessful
return nil
}
func deepCopy_experimental_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
return nil return nil
} }
func deepCopy_expapi_ResourceConsumption(in ResourceConsumption, out *ResourceConsumption, c *conversion.Cloner) error { func deepCopy_experimental_ResourceConsumption(in ResourceConsumption, out *ResourceConsumption, c *conversion.Cloner) error {
out.Resource = in.Resource out.Resource = in.Resource
if err := deepCopy_resource_Quantity(in.Quantity, &out.Quantity, c); err != nil { if err := deepCopy_resource_Quantity(in.Quantity, &out.Quantity, c); err != nil {
return err return err
@@ -1001,7 +1116,7 @@ func deepCopy_expapi_ResourceConsumption(in ResourceConsumption, out *ResourceCo
return nil return nil
} }
func deepCopy_expapi_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error { func deepCopy_experimental_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error {
if err := deepCopy_util_IntOrString(in.MaxUnavailable, &out.MaxUnavailable, c); err != nil { if err := deepCopy_util_IntOrString(in.MaxUnavailable, &out.MaxUnavailable, c); err != nil {
return err return err
} }
@@ -1012,28 +1127,28 @@ func deepCopy_expapi_RollingUpdateDeployment(in RollingUpdateDeployment, out *Ro
return nil return nil
} }
func deepCopy_expapi_Scale(in Scale, out *Scale, c *conversion.Cloner) error { func deepCopy_experimental_Scale(in Scale, out *Scale, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err return err
} }
if err := deepCopy_expapi_ScaleSpec(in.Spec, &out.Spec, c); err != nil { if err := deepCopy_experimental_ScaleSpec(in.Spec, &out.Spec, c); err != nil {
return err return err
} }
if err := deepCopy_expapi_ScaleStatus(in.Status, &out.Status, c); err != nil { if err := deepCopy_experimental_ScaleStatus(in.Status, &out.Status, c); err != nil {
return err return err
} }
return nil return nil
} }
func deepCopy_expapi_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { func deepCopy_experimental_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error {
out.Replicas = in.Replicas out.Replicas = in.Replicas
return nil return nil
} }
func deepCopy_expapi_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { func deepCopy_experimental_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error {
out.Replicas = in.Replicas out.Replicas = in.Replicas
if in.Selector != nil { if in.Selector != nil {
out.Selector = make(map[string]string) out.Selector = make(map[string]string)
@@ -1046,7 +1161,7 @@ func deepCopy_expapi_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion
return nil return nil
} }
func deepCopy_expapi_SubresourceReference(in SubresourceReference, out *SubresourceReference, c *conversion.Cloner) error { func deepCopy_experimental_SubresourceReference(in SubresourceReference, out *SubresourceReference, c *conversion.Cloner) error {
out.Kind = in.Kind out.Kind = in.Kind
out.Namespace = in.Namespace out.Namespace = in.Namespace
out.Name = in.Name out.Name = in.Name
@@ -1055,7 +1170,7 @@ func deepCopy_expapi_SubresourceReference(in SubresourceReference, out *Subresou
return nil return nil
} }
func deepCopy_expapi_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error { func deepCopy_experimental_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@@ -1066,7 +1181,7 @@ func deepCopy_expapi_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyRe
if in.Versions != nil { if in.Versions != nil {
out.Versions = make([]APIVersion, len(in.Versions)) out.Versions = make([]APIVersion, len(in.Versions))
for i := range in.Versions { for i := range in.Versions {
if err := deepCopy_expapi_APIVersion(in.Versions[i], &out.Versions[i], c); err != nil { if err := deepCopy_experimental_APIVersion(in.Versions[i], &out.Versions[i], c); err != nil {
return err return err
} }
} }
@@ -1076,7 +1191,7 @@ func deepCopy_expapi_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyRe
return nil return nil
} }
func deepCopy_expapi_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error { func deepCopy_experimental_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@@ -1094,7 +1209,7 @@ func deepCopy_expapi_ThirdPartyResourceData(in ThirdPartyResourceData, out *Thir
return nil return nil
} }
func deepCopy_expapi_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error { func deepCopy_experimental_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@@ -1104,7 +1219,7 @@ func deepCopy_expapi_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, o
if in.Items != nil { if in.Items != nil {
out.Items = make([]ThirdPartyResourceData, len(in.Items)) out.Items = make([]ThirdPartyResourceData, len(in.Items))
for i := range in.Items { for i := range in.Items {
if err := deepCopy_expapi_ThirdPartyResourceData(in.Items[i], &out.Items[i], c); err != nil { if err := deepCopy_experimental_ThirdPartyResourceData(in.Items[i], &out.Items[i], c); err != nil {
return err return err
} }
} }
@@ -1114,7 +1229,7 @@ func deepCopy_expapi_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, o
return nil return nil
} }
func deepCopy_expapi_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error { func deepCopy_experimental_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error {
if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@@ -1124,7 +1239,7 @@ func deepCopy_expapi_ThirdPartyResourceList(in ThirdPartyResourceList, out *Thir
if in.Items != nil { if in.Items != nil {
out.Items = make([]ThirdPartyResource, len(in.Items)) out.Items = make([]ThirdPartyResource, len(in.Items))
for i := range in.Items { for i := range in.Items {
if err := deepCopy_expapi_ThirdPartyResource(in.Items[i], &out.Items[i], c); err != nil { if err := deepCopy_experimental_ThirdPartyResource(in.Items[i], &out.Items[i], c); err != nil {
return err return err
} }
} }
@@ -1192,31 +1307,36 @@ func init() {
deepCopy_api_VolumeMount, deepCopy_api_VolumeMount,
deepCopy_api_VolumeSource, deepCopy_api_VolumeSource,
deepCopy_resource_Quantity, deepCopy_resource_Quantity,
deepCopy_expapi_APIVersion, deepCopy_experimental_APIVersion,
deepCopy_expapi_DaemonSet, deepCopy_experimental_DaemonSet,
deepCopy_expapi_DaemonSetList, deepCopy_experimental_DaemonSetList,
deepCopy_expapi_DaemonSetSpec, deepCopy_experimental_DaemonSetSpec,
deepCopy_expapi_DaemonSetStatus, deepCopy_experimental_DaemonSetStatus,
deepCopy_expapi_Deployment, deepCopy_experimental_Deployment,
deepCopy_expapi_DeploymentList, deepCopy_experimental_DeploymentList,
deepCopy_expapi_DeploymentSpec, deepCopy_experimental_DeploymentSpec,
deepCopy_expapi_DeploymentStatus, deepCopy_experimental_DeploymentStatus,
deepCopy_expapi_DeploymentStrategy, deepCopy_experimental_DeploymentStrategy,
deepCopy_expapi_HorizontalPodAutoscaler, deepCopy_experimental_HorizontalPodAutoscaler,
deepCopy_expapi_HorizontalPodAutoscalerList, deepCopy_experimental_HorizontalPodAutoscalerList,
deepCopy_expapi_HorizontalPodAutoscalerSpec, deepCopy_experimental_HorizontalPodAutoscalerSpec,
deepCopy_expapi_HorizontalPodAutoscalerStatus, deepCopy_experimental_HorizontalPodAutoscalerStatus,
deepCopy_expapi_ReplicationControllerDummy, deepCopy_experimental_Job,
deepCopy_expapi_ResourceConsumption, deepCopy_experimental_JobCondition,
deepCopy_expapi_RollingUpdateDeployment, deepCopy_experimental_JobList,
deepCopy_expapi_Scale, deepCopy_experimental_JobSpec,
deepCopy_expapi_ScaleSpec, deepCopy_experimental_JobStatus,
deepCopy_expapi_ScaleStatus, deepCopy_experimental_ReplicationControllerDummy,
deepCopy_expapi_SubresourceReference, deepCopy_experimental_ResourceConsumption,
deepCopy_expapi_ThirdPartyResource, deepCopy_experimental_RollingUpdateDeployment,
deepCopy_expapi_ThirdPartyResourceData, deepCopy_experimental_Scale,
deepCopy_expapi_ThirdPartyResourceDataList, deepCopy_experimental_ScaleSpec,
deepCopy_expapi_ThirdPartyResourceList, deepCopy_experimental_ScaleStatus,
deepCopy_experimental_SubresourceReference,
deepCopy_experimental_ThirdPartyResource,
deepCopy_experimental_ThirdPartyResourceData,
deepCopy_experimental_ThirdPartyResourceDataList,
deepCopy_experimental_ThirdPartyResourceList,
deepCopy_util_IntOrString, deepCopy_util_IntOrString,
deepCopy_util_Time, deepCopy_util_Time,
) )

View File

@@ -23,8 +23,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/registered" "k8s.io/kubernetes/pkg/api/registered"
_ "k8s.io/kubernetes/pkg/expapi" _ "k8s.io/kubernetes/pkg/apis/experimental"
"k8s.io/kubernetes/pkg/expapi/v1" "k8s.io/kubernetes/pkg/apis/experimental/v1"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
) )
@@ -39,7 +39,7 @@ var (
RESTMapper meta.RESTMapper RESTMapper meta.RESTMapper
) )
const importPrefix = "k8s.io/kubernetes/pkg/expapi" const importPrefix = "k8s.io/kubernetes/pkg/apis/experimental"
func init() { func init() {
Version = registered.RegisteredVersions[0] Version = registered.RegisteredVersions[0]

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package expapi package experimental
import ( import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
@@ -32,6 +32,8 @@ func addKnownTypes() {
&DeploymentList{}, &DeploymentList{},
&HorizontalPodAutoscaler{}, &HorizontalPodAutoscaler{},
&HorizontalPodAutoscalerList{}, &HorizontalPodAutoscalerList{},
&Job{},
&JobList{},
&ReplicationControllerDummy{}, &ReplicationControllerDummy{},
&Scale{}, &Scale{},
&ThirdPartyResource{}, &ThirdPartyResource{},
@@ -47,6 +49,8 @@ func (*Deployment) IsAnAPIObject() {}
func (*DeploymentList) IsAnAPIObject() {} func (*DeploymentList) IsAnAPIObject() {}
func (*HorizontalPodAutoscaler) IsAnAPIObject() {} func (*HorizontalPodAutoscaler) IsAnAPIObject() {}
func (*HorizontalPodAutoscalerList) IsAnAPIObject() {} func (*HorizontalPodAutoscalerList) IsAnAPIObject() {}
func (*Job) IsAnAPIObject() {}
func (*JobList) IsAnAPIObject() {}
func (*ReplicationControllerDummy) IsAnAPIObject() {} func (*ReplicationControllerDummy) IsAnAPIObject() {}
func (*Scale) IsAnAPIObject() {} func (*Scale) IsAnAPIObject() {}
func (*ThirdPartyResource) IsAnAPIObject() {} func (*ThirdPartyResource) IsAnAPIObject() {}

View File

@@ -19,7 +19,7 @@ package testapi
import ( import (
"strings" "strings"
"k8s.io/kubernetes/pkg/expapi/latest" "k8s.io/kubernetes/pkg/apis/experimental/latest"
) )
// Returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name. // Returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name.

View File

@@ -15,7 +15,7 @@ limitations under the License.
*/ */
/* /*
This file (together with pkg/expapi/v1/types.go) contain the experimental This file (together with pkg/apis/experimental/v1/types.go) contain the experimental
types in kubernetes. These API objects are experimental, meaning that the types in kubernetes. These API objects are experimental, meaning that the
APIs may be broken at any time by the kubernetes team. APIs may be broken at any time by the kubernetes team.
@@ -26,7 +26,7 @@ beyond registration differences. In other words, experimental API group
support is experimental. support is experimental.
*/ */
package expapi package experimental
import ( import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
@@ -362,3 +362,102 @@ type ThirdPartyResourceDataList struct {
// Items is a list of third party objects // Items is a list of third party objects
Items []ThirdPartyResourceData `json:"items"` Items []ThirdPartyResourceData `json:"items"`
} }
// Job represents the configuration of a single job.
type Job struct {
api.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
api.ObjectMeta `json:"metadata,omitempty"`
// Spec is a structure defining the expected behavior of a job.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
Spec JobSpec `json:"spec,omitempty"`
// Status is a structure describing current status of a job.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
Status JobStatus `json:"status,omitempty"`
}
// JobList is a collection of jobs.
type JobList struct {
api.TypeMeta `json:",inline"`
// Standard list metadata
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
api.ListMeta `json:"metadata,omitempty"`
// Items is the list of Job.
Items []Job `json:"items"`
}
// JobSpec describes how the job execution will look like.
type JobSpec struct {
// Parallelism specifies the maximum desired number of pods the job should
// run at any given time. The actual number of pods running in steady state will
// be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
// i.e. when the work left to do is less than max parallelism.
Parallelism *int `json:"parallelism,omitempty"`
// Completions specifies the desired number of successfully finished pods the
// job should be run with. Defaults to 1.
Completions *int `json:"completions,omitempty"`
// Selector is a label query over pods that should match the pod count.
Selector map[string]string `json:"selector"`
// Template is the object that describes the pod that will be created when
// executing a job.
Template *api.PodTemplateSpec `json:"template"`
}
// JobStatus represents the current state of a Job.
type JobStatus struct {
// Conditions represent the latest available observations of an object's current state.
Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
// StartTime represents time when the job was acknowledged by the Job Manager.
// It is not guaranteed to be set in happens-before order across separate operations.
// It is represented in RFC3339 form and is in UTC.
StartTime *util.Time `json:"startTime,omitempty"`
// CompletionTime represents time when the job was completed. It is not guaranteed to
// be set in happens-before order across separate operations.
// It is represented in RFC3339 form and is in UTC.
CompletionTime *util.Time `json:"completionTime,omitempty"`
// Active is the number of actively running pods.
Active int `json:"active,omitempty"`
// Successful is the number of pods which reached Phase Succeeded.
Successful int `json:"successful,omitempty"`
// Unsuccessful is the number of pods failures, this applies only to jobs
// created with RestartPolicyNever, otherwise this value will always be 0.
Unsuccessful int `json:"unsuccessful,omitempty"`
}
type JobConditionType string
// These are valid conditions of a job.
const (
// JobComplete means the job has completed its execution.
JobComplete JobConditionType = "Complete"
)
// JobCondition describes current state of a job.
type JobCondition struct {
// Type of job condition, currently only Complete.
Type JobConditionType `json:"type"`
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus `json:"status"`
// Last time the condition was checked.
LastProbeTime util.Time `json:"lastProbeTime,omitempty"`
// Last time the condition transit from one status to another.
LastTransitionTime util.Time `json:"lastTransitionTime,omitempty"`
// (brief) reason for the condition's last transition.
Reason string `json:"reason,omitempty"`
// Human readable message indicating details about last transition.
Message string `json:"message,omitempty"`
}

View File

@@ -21,8 +21,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
v1 "k8s.io/kubernetes/pkg/api/v1" v1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/experimental"
"k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/expapi"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
) )
@@ -31,12 +31,12 @@ func addConversionFuncs() {
err := api.Scheme.AddConversionFuncs( err := api.Scheme.AddConversionFuncs(
convert_api_PodSpec_To_v1_PodSpec, convert_api_PodSpec_To_v1_PodSpec,
convert_v1_PodSpec_To_api_PodSpec, convert_v1_PodSpec_To_api_PodSpec,
convert_expapi_DeploymentSpec_To_v1_DeploymentSpec, convert_experimental_DeploymentSpec_To_v1_DeploymentSpec,
convert_v1_DeploymentSpec_To_expapi_DeploymentSpec, convert_v1_DeploymentSpec_To_experimental_DeploymentSpec,
convert_expapi_DeploymentStrategy_To_v1_DeploymentStrategy, convert_experimental_DeploymentStrategy_To_v1_DeploymentStrategy,
convert_v1_DeploymentStrategy_To_expapi_DeploymentStrategy, convert_v1_DeploymentStrategy_To_experimental_DeploymentStrategy,
convert_expapi_RollingUpdateDeployment_To_v1_RollingUpdateDeployment, convert_experimental_RollingUpdateDeployment_To_v1_RollingUpdateDeployment,
convert_v1_RollingUpdateDeployment_To_expapi_RollingUpdateDeployment, convert_v1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment,
) )
if err != nil { if err != nil {
// If one of the conversion functions is malformed, detect it immediately. // If one of the conversion functions is malformed, detect it immediately.
@@ -178,9 +178,9 @@ func convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conve
return nil return nil
} }
func convert_expapi_DeploymentSpec_To_v1_DeploymentSpec(in *expapi.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { func convert_experimental_DeploymentSpec_To_v1_DeploymentSpec(in *experimental.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*expapi.DeploymentSpec))(in) defaulting.(func(*experimental.DeploymentSpec))(in)
} }
out.Replicas = new(int) out.Replicas = new(int)
*out.Replicas = in.Replicas *out.Replicas = in.Replicas
@@ -200,7 +200,7 @@ func convert_expapi_DeploymentSpec_To_v1_DeploymentSpec(in *expapi.DeploymentSpe
} else { } else {
out.Template = nil out.Template = nil
} }
if err := convert_expapi_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { if err := convert_experimental_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err return err
} }
out.UniqueLabelKey = new(string) out.UniqueLabelKey = new(string)
@@ -208,7 +208,7 @@ func convert_expapi_DeploymentSpec_To_v1_DeploymentSpec(in *expapi.DeploymentSpe
return nil return nil
} }
func convert_v1_DeploymentSpec_To_expapi_DeploymentSpec(in *DeploymentSpec, out *expapi.DeploymentSpec, s conversion.Scope) error { func convert_v1_DeploymentSpec_To_experimental_DeploymentSpec(in *DeploymentSpec, out *experimental.DeploymentSpec, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*DeploymentSpec))(in) defaulting.(func(*DeploymentSpec))(in)
} }
@@ -231,7 +231,7 @@ func convert_v1_DeploymentSpec_To_expapi_DeploymentSpec(in *DeploymentSpec, out
} else { } else {
out.Template = nil out.Template = nil
} }
if err := convert_v1_DeploymentStrategy_To_expapi_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { if err := convert_v1_DeploymentStrategy_To_experimental_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err return err
} }
if in.UniqueLabelKey != nil { if in.UniqueLabelKey != nil {
@@ -240,14 +240,14 @@ func convert_v1_DeploymentSpec_To_expapi_DeploymentSpec(in *DeploymentSpec, out
return nil return nil
} }
func convert_expapi_DeploymentStrategy_To_v1_DeploymentStrategy(in *expapi.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { func convert_experimental_DeploymentStrategy_To_v1_DeploymentStrategy(in *experimental.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*expapi.DeploymentStrategy))(in) defaulting.(func(*experimental.DeploymentStrategy))(in)
} }
out.Type = DeploymentType(in.Type) out.Type = DeploymentType(in.Type)
if in.RollingUpdate != nil { if in.RollingUpdate != nil {
out.RollingUpdate = new(RollingUpdateDeployment) out.RollingUpdate = new(RollingUpdateDeployment)
if err := convert_expapi_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { if err := convert_experimental_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil {
return err return err
} }
} else { } else {
@@ -256,14 +256,14 @@ func convert_expapi_DeploymentStrategy_To_v1_DeploymentStrategy(in *expapi.Deplo
return nil return nil
} }
func convert_v1_DeploymentStrategy_To_expapi_DeploymentStrategy(in *DeploymentStrategy, out *expapi.DeploymentStrategy, s conversion.Scope) error { func convert_v1_DeploymentStrategy_To_experimental_DeploymentStrategy(in *DeploymentStrategy, out *experimental.DeploymentStrategy, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*DeploymentStrategy))(in) defaulting.(func(*DeploymentStrategy))(in)
} }
out.Type = expapi.DeploymentType(in.Type) out.Type = experimental.DeploymentType(in.Type)
if in.RollingUpdate != nil { if in.RollingUpdate != nil {
out.RollingUpdate = new(expapi.RollingUpdateDeployment) out.RollingUpdate = new(experimental.RollingUpdateDeployment)
if err := convert_v1_RollingUpdateDeployment_To_expapi_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { if err := convert_v1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil {
return err return err
} }
} else { } else {
@@ -272,9 +272,9 @@ func convert_v1_DeploymentStrategy_To_expapi_DeploymentStrategy(in *DeploymentSt
return nil return nil
} }
func convert_expapi_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *expapi.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { func convert_experimental_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *experimental.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*expapi.RollingUpdateDeployment))(in) defaulting.(func(*experimental.RollingUpdateDeployment))(in)
} }
if out.MaxUnavailable == nil { if out.MaxUnavailable == nil {
out.MaxUnavailable = &util.IntOrString{} out.MaxUnavailable = &util.IntOrString{}
@@ -292,7 +292,7 @@ func convert_expapi_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *ex
return nil return nil
} }
func convert_v1_RollingUpdateDeployment_To_expapi_RollingUpdateDeployment(in *RollingUpdateDeployment, out *expapi.RollingUpdateDeployment, s conversion.Scope) error { func convert_v1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment(in *RollingUpdateDeployment, out *experimental.RollingUpdateDeployment, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*RollingUpdateDeployment))(in) defaulting.(func(*RollingUpdateDeployment))(in)
} }

View File

@@ -998,6 +998,121 @@ func deepCopy_v1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus,
return nil return nil
} }
func deepCopy_v1_Job(in Job, out *Job, c *conversion.Cloner) error {
if err := deepCopy_v1_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if err := deepCopy_v1_JobSpec(in.Spec, &out.Spec, c); err != nil {
return err
}
if err := deepCopy_v1_JobStatus(in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
func deepCopy_v1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error {
out.Type = in.Type
out.Status = in.Status
if err := deepCopy_util_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil {
return err
}
if err := deepCopy_util_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil {
return err
}
out.Reason = in.Reason
out.Message = in.Message
return nil
}
func deepCopy_v1_JobList(in JobList, out *JobList, c *conversion.Cloner) error {
if err := deepCopy_v1_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_v1_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]Job, len(in.Items))
for i := range in.Items {
if err := deepCopy_v1_Job(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func deepCopy_v1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error {
if in.Parallelism != nil {
out.Parallelism = new(int)
*out.Parallelism = *in.Parallelism
} else {
out.Parallelism = nil
}
if in.Completions != nil {
out.Completions = new(int)
*out.Completions = *in.Completions
} else {
out.Completions = nil
}
if in.Selector != nil {
out.Selector = make(map[string]string)
for key, val := range in.Selector {
out.Selector[key] = val
}
} else {
out.Selector = nil
}
if in.Template != nil {
out.Template = new(v1.PodTemplateSpec)
if err := deepCopy_v1_PodTemplateSpec(*in.Template, out.Template, c); err != nil {
return err
}
} else {
out.Template = nil
}
return nil
}
func deepCopy_v1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error {
if in.Conditions != nil {
out.Conditions = make([]JobCondition, len(in.Conditions))
for i := range in.Conditions {
if err := deepCopy_v1_JobCondition(in.Conditions[i], &out.Conditions[i], c); err != nil {
return err
}
}
} else {
out.Conditions = nil
}
if in.StartTime != nil {
out.StartTime = new(util.Time)
if err := deepCopy_util_Time(*in.StartTime, out.StartTime, c); err != nil {
return err
}
} else {
out.StartTime = nil
}
if in.CompletionTime != nil {
out.CompletionTime = new(util.Time)
if err := deepCopy_util_Time(*in.CompletionTime, out.CompletionTime, c); err != nil {
return err
}
} else {
out.CompletionTime = nil
}
out.Active = in.Active
out.Successful = in.Successful
out.Unsuccessful = in.Unsuccessful
return nil
}
func deepCopy_v1_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error { func deepCopy_v1_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error {
if err := deepCopy_v1_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_v1_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
@@ -1228,6 +1343,11 @@ func init() {
deepCopy_v1_HorizontalPodAutoscalerList, deepCopy_v1_HorizontalPodAutoscalerList,
deepCopy_v1_HorizontalPodAutoscalerSpec, deepCopy_v1_HorizontalPodAutoscalerSpec,
deepCopy_v1_HorizontalPodAutoscalerStatus, deepCopy_v1_HorizontalPodAutoscalerStatus,
deepCopy_v1_Job,
deepCopy_v1_JobCondition,
deepCopy_v1_JobList,
deepCopy_v1_JobSpec,
deepCopy_v1_JobStatus,
deepCopy_v1_ReplicationControllerDummy, deepCopy_v1_ReplicationControllerDummy,
deepCopy_v1_ResourceConsumption, deepCopy_v1_ResourceConsumption,
deepCopy_v1_RollingUpdateDeployment, deepCopy_v1_RollingUpdateDeployment,

View File

@@ -36,6 +36,8 @@ func addKnownTypes() {
&DeploymentList{}, &DeploymentList{},
&HorizontalPodAutoscaler{}, &HorizontalPodAutoscaler{},
&HorizontalPodAutoscalerList{}, &HorizontalPodAutoscalerList{},
&Job{},
&JobList{},
&ReplicationControllerDummy{}, &ReplicationControllerDummy{},
&Scale{}, &Scale{},
&ThirdPartyResource{}, &ThirdPartyResource{},
@@ -51,6 +53,8 @@ func (*Deployment) IsAnAPIObject() {}
func (*DeploymentList) IsAnAPIObject() {} func (*DeploymentList) IsAnAPIObject() {}
func (*HorizontalPodAutoscaler) IsAnAPIObject() {} func (*HorizontalPodAutoscaler) IsAnAPIObject() {}
func (*HorizontalPodAutoscalerList) IsAnAPIObject() {} func (*HorizontalPodAutoscalerList) IsAnAPIObject() {}
func (*Job) IsAnAPIObject() {}
func (*JobList) IsAnAPIObject() {}
func (*ReplicationControllerDummy) IsAnAPIObject() {} func (*ReplicationControllerDummy) IsAnAPIObject() {}
func (*Scale) IsAnAPIObject() {} func (*Scale) IsAnAPIObject() {}
func (*ThirdPartyResource) IsAnAPIObject() {} func (*ThirdPartyResource) IsAnAPIObject() {}

View File

@@ -363,3 +363,102 @@ type ThirdPartyResourceDataList struct {
// Items is the list of ThirdpartyResourceData. // Items is the list of ThirdpartyResourceData.
Items []ThirdPartyResourceData `json:"items"` Items []ThirdPartyResourceData `json:"items"`
} }
// Job represents the configuration of a single job.
type Job struct {
v1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
v1.ObjectMeta `json:"metadata,omitempty"`
// Spec is a structure defining the expected behavior of a job.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
Spec JobSpec `json:"spec,omitempty"`
// Status is a structure describing current status of a job.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
Status JobStatus `json:"status,omitempty"`
}
// JobList is a collection of jobs.
type JobList struct {
v1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
v1.ListMeta `json:"metadata,omitempty"`
// Items is the list of Job.
Items []Job `json:"items"`
}
// JobSpec describes how the job execution will look like.
type JobSpec struct {
// Parallelism specifies the maximum desired number of pods the job should
// run at any given time. The actual number of pods running in steady state will
// be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
// i.e. when the work left to do is less than max parallelism.
Parallelism *int `json:"parallelism,omitempty"`
// Completions specifies the desired number of successfully finished pods the
// job should be run with. Defaults to 1.
Completions *int `json:"completions,omitempty"`
// Selector is a label query over pods that should match the pod count.
Selector map[string]string `json:"selector"`
// Template is the object that describes the pod that will be created when
// executing a job.
Template *v1.PodTemplateSpec `json:"template"`
}
// JobStatus represents the current state of a Job.
type JobStatus struct {
// Conditions represent the latest available observations of an object's current state.
Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
// StartTime represents time when the job was acknowledged by the Job Manager.
// It is not guaranteed to be set in happens-before order across separate operations.
// It is represented in RFC3339 form and is in UTC.
StartTime *util.Time `json:"startTime,omitempty"`
// CompletionTime represents time when the job was completed. It is not guaranteed to
// be set in happens-before order across separate operations.
// It is represented in RFC3339 form and is in UTC.
CompletionTime *util.Time `json:"completionTime,omitempty"`
// Active is the number of actively running pods.
Active int `json:"active,omitempty"`
// Successful is the number of pods which reached Phase Succeeded.
Successful int `json:"successful,omitempty"`
// Unsuccessful is the number of pods failures, this applies only to jobs
// created with RestartPolicyNever, otherwise this value will always be 0.
Unsuccessful int `json:"unsuccessful,omitempty"`
}
type JobConditionType string
// These are valid conditions of a job.
const (
// JobComplete means the job has completed its execution.
JobComplete JobConditionType = "Complete"
)
// JobCondition describes current state of a job.
type JobCondition struct {
// Type of job condition, currently only Complete.
Type JobConditionType `json:"type"`
// Status of the condition, one of True, False, Unknown.
Status v1.ConditionStatus `json:"status"`
// Last time the condition was checked.
LastProbeTime util.Time `json:"lastProbeTime,omitempty"`
// Last time the condition transit from one status to another.
LastTransitionTime util.Time `json:"lastTransitionTime,omitempty"`
// (brief) reason for the condition's last transition.
Reason string `json:"reason,omitempty"`
// Human readable message indicating details about last transition.
Message string `json:"message,omitempty"`
}

Some files were not shown because too many files have changed in this diff Show More