Merge branch 'master' into upgrade_aliases_branch
This commit is contained in:
19
api/openapi-spec/swagger.json
generated
19
api/openapi-spec/swagger.json
generated
@@ -79217,6 +79217,18 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"io.k8s.api.extensions.v1beta1.AllowedFlexVolume": {
|
||||||
|
"description": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.",
|
||||||
|
"required": [
|
||||||
|
"driver"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"driver": {
|
||||||
|
"description": "Driver is the name of the Flexvolume driver.",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"io.k8s.api.extensions.v1beta1.AllowedHostPath": {
|
"io.k8s.api.extensions.v1beta1.AllowedHostPath": {
|
||||||
"description": "defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.",
|
"description": "defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.",
|
||||||
"properties": {
|
"properties": {
|
||||||
@@ -80171,6 +80183,13 @@
|
|||||||
"type": "string"
|
"type": "string"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"allowedFlexVolumes": {
|
||||||
|
"description": "AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.",
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/definitions/io.k8s.api.extensions.v1beta1.AllowedFlexVolume"
|
||||||
|
}
|
||||||
|
},
|
||||||
"allowedHostPaths": {
|
"allowedHostPaths": {
|
||||||
"description": "is a white list of allowed host paths. Empty indicates that all host paths may be used.",
|
"description": "is a white list of allowed host paths. Empty indicates that all host paths may be used.",
|
||||||
"type": "array",
|
"type": "array",
|
||||||
|
|||||||
20
api/swagger-spec/extensions_v1beta1.json
generated
20
api/swagger-spec/extensions_v1beta1.json
generated
@@ -10314,6 +10314,13 @@
|
|||||||
"$ref": "v1beta1.AllowedHostPath"
|
"$ref": "v1beta1.AllowedHostPath"
|
||||||
},
|
},
|
||||||
"description": "is a white list of allowed host paths. Empty indicates that all host paths may be used."
|
"description": "is a white list of allowed host paths. Empty indicates that all host paths may be used."
|
||||||
|
},
|
||||||
|
"allowedFlexVolumes": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "v1beta1.AllowedFlexVolume"
|
||||||
|
},
|
||||||
|
"description": "AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -10442,6 +10449,19 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"v1beta1.AllowedFlexVolume": {
|
||||||
|
"id": "v1beta1.AllowedFlexVolume",
|
||||||
|
"description": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.",
|
||||||
|
"required": [
|
||||||
|
"driver"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"driver": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Driver is the name of the Flexvolume driver."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"v1beta1.ReplicaSetList": {
|
"v1beta1.ReplicaSetList": {
|
||||||
"id": "v1beta1.ReplicaSetList",
|
"id": "v1beta1.ReplicaSetList",
|
||||||
"description": "ReplicaSetList is a collection of ReplicaSets.",
|
"description": "ReplicaSetList is a collection of ReplicaSets.",
|
||||||
|
|||||||
@@ -19,8 +19,8 @@ FROM BASEIMAGE
|
|||||||
CROSS_BUILD_COPY qemu-ARCH-static /usr/bin/
|
CROSS_BUILD_COPY qemu-ARCH-static /usr/bin/
|
||||||
|
|
||||||
RUN clean-install \
|
RUN clean-install \
|
||||||
iptables \
|
|
||||||
ebtables \
|
|
||||||
conntrack \
|
conntrack \
|
||||||
module-init-tools \
|
ebtables \
|
||||||
ipset
|
ipset \
|
||||||
|
iptables \
|
||||||
|
kmod
|
||||||
|
|||||||
@@ -98,18 +98,23 @@ function kube::release::package_tarballs() {
|
|||||||
|
|
||||||
# Package the source code we built, for compliance/licensing/audit/yadda.
|
# Package the source code we built, for compliance/licensing/audit/yadda.
|
||||||
function kube::release::package_src_tarball() {
|
function kube::release::package_src_tarball() {
|
||||||
|
local -r src_tarball="${RELEASE_TARS}/kubernetes-src.tar.gz"
|
||||||
kube::log::status "Building tarball: src"
|
kube::log::status "Building tarball: src"
|
||||||
local source_files=(
|
if [[ "${KUBE_GIT_TREE_STATE-}" == "clean" ]]; then
|
||||||
$(cd "${KUBE_ROOT}" && find . -mindepth 1 -maxdepth 1 \
|
git archive -o "${src_tarball}" HEAD
|
||||||
-not \( \
|
else
|
||||||
\( -path ./_\* -o \
|
local source_files=(
|
||||||
-path ./.git\* -o \
|
$(cd "${KUBE_ROOT}" && find . -mindepth 1 -maxdepth 1 \
|
||||||
-path ./.config\* -o \
|
-not \( \
|
||||||
-path ./.gsutil\* \
|
\( -path ./_\* -o \
|
||||||
\) -prune \
|
-path ./.git\* -o \
|
||||||
\))
|
-path ./.config\* -o \
|
||||||
)
|
-path ./.gsutil\* \
|
||||||
"${TAR}" czf "${RELEASE_TARS}/kubernetes-src.tar.gz" -C "${KUBE_ROOT}" "${source_files[@]}"
|
\) -prune \
|
||||||
|
\))
|
||||||
|
)
|
||||||
|
"${TAR}" czf "${src_tarball}" -C "${KUBE_ROOT}" "${source_files[@]}"
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Package up all of the cross compiled clients. Over time this should grow into
|
# Package up all of the cross compiled clients. Over time this should grow into
|
||||||
|
|||||||
6
cluster/addons/metadata-agent/OWNERS
Normal file
6
cluster/addons/metadata-agent/OWNERS
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
approvers:
|
||||||
|
- kawych
|
||||||
|
- piosz
|
||||||
|
reviewers:
|
||||||
|
- kawych
|
||||||
|
- piosz
|
||||||
4
cluster/addons/metadata-agent/README.md
Normal file
4
cluster/addons/metadata-agent/README.md
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
# Kubernetes Metadata Agent
|
||||||
|
|
||||||
|
Metadata Agent is a source of metadata required by logging and monitoring agents
|
||||||
|
running on a cluster.
|
||||||
@@ -0,0 +1,38 @@
|
|||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: stackdriver-agents
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
name: stackdriver-agents
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: stackdriver-agents
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: stackdriver-agents
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- image: us.gcr.io/container-monitoring-storage/stackdriver-metadata-agent:{{ metadata_agent_version }}
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
name: metadata-agent
|
||||||
|
ports:
|
||||||
|
- containerPort: 8000
|
||||||
|
hostPort: 8000
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: {{ metadata_agent_cpu_request }}
|
||||||
|
memory: {{ metadata_agent_memory_request }}
|
||||||
|
dnsPolicy: ClusterFirst
|
||||||
|
restartPolicy: Always
|
||||||
|
schedulerName: default-scheduler
|
||||||
|
terminationGracePeriodSeconds: 30
|
||||||
|
updateStrategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
type: RollingUpdate
|
||||||
@@ -120,7 +120,7 @@ export FLANNEL_NET=${FLANNEL_NET:-"172.16.0.0/16"}
|
|||||||
|
|
||||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||||
# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
|
# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
|
||||||
export ADMISSION_CONTROL=${ADMISSION_CONTROL:-"Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultTolerationSeconds,Priority,ResourceQuota"}
|
export ADMISSION_CONTROL=${ADMISSION_CONTROL:-"Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultTolerationSeconds,Priority,PVCProtection,ResourceQuota"}
|
||||||
|
|
||||||
# Extra options to set on the Docker command line.
|
# Extra options to set on the Docker command line.
|
||||||
# This is useful for setting --insecure-registry for local registries.
|
# This is useful for setting --insecure-registry for local registries.
|
||||||
|
|||||||
@@ -610,6 +610,8 @@ KUBERNETES_MASTER_NAME: $(yaml-quote ${KUBERNETES_MASTER_NAME})
|
|||||||
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
|
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
|
||||||
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
|
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
|
||||||
ENABLE_METRICS_SERVER: $(yaml-quote ${ENABLE_METRICS_SERVER:-false})
|
ENABLE_METRICS_SERVER: $(yaml-quote ${ENABLE_METRICS_SERVER:-false})
|
||||||
|
ENABLE_METADATA_AGENT: $(yaml-quote ${ENABLE_METADATA_AGENT:-none})
|
||||||
|
METADATA_AGENT_VERSION: $(yaml-quote ${METADATA_AGENT_VERSION:-})
|
||||||
DOCKER_REGISTRY_MIRROR_URL: $(yaml-quote ${DOCKER_REGISTRY_MIRROR_URL:-})
|
DOCKER_REGISTRY_MIRROR_URL: $(yaml-quote ${DOCKER_REGISTRY_MIRROR_URL:-})
|
||||||
ENABLE_L7_LOADBALANCING: $(yaml-quote ${ENABLE_L7_LOADBALANCING:-none})
|
ENABLE_L7_LOADBALANCING: $(yaml-quote ${ENABLE_L7_LOADBALANCING:-none})
|
||||||
ENABLE_CLUSTER_LOGGING: $(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false})
|
ENABLE_CLUSTER_LOGGING: $(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false})
|
||||||
|
|||||||
@@ -150,6 +150,16 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
|||||||
# TODO(piosz) remove this option once Metrics Server became a stable thing.
|
# TODO(piosz) remove this option once Metrics Server became a stable thing.
|
||||||
ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
|
ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
|
||||||
|
|
||||||
|
# Optional: Metadata agent to setup as part of the cluster bring up:
|
||||||
|
# none - No metadata agent
|
||||||
|
# stackdriver - Stackdriver metadata agent
|
||||||
|
# Metadata agent is a daemon set that provides metadata of kubernetes objects
|
||||||
|
# running on the same node for exporting metrics and logs.
|
||||||
|
ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
|
||||||
|
|
||||||
|
# Version tag of metadata agent
|
||||||
|
METADATA_AGENT_VERSION="${KUBE_METADATA_AGENT_VERSION:-0.2-0.0.13-5}"
|
||||||
|
|
||||||
# One special node out of NUM_NODES would be created of this type if specified.
|
# One special node out of NUM_NODES would be created of this type if specified.
|
||||||
# Useful for scheduling heapster in large clusters with nodes of small size.
|
# Useful for scheduling heapster in large clusters with nodes of small size.
|
||||||
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
|
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
|
||||||
@@ -289,7 +299,7 @@ if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||||
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority
|
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,PVCProtection
|
||||||
|
|
||||||
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
|
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
|
||||||
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
|
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
|
||||||
|
|||||||
@@ -1297,6 +1297,20 @@ EOF
|
|||||||
sed -i -e "s@{{ use_new_resources }}@${use_new_resources}@g" "${controller_yaml}"
|
sed -i -e "s@{{ use_new_resources }}@${use_new_resources}@g" "${controller_yaml}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]] ||
|
||||||
|
([[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]] &&
|
||||||
|
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]); then
|
||||||
|
if [[ "${ENABLE_METADATA_AGENT:-}" == "stackdriver" ]] &&
|
||||||
|
[[ "${METADATA_AGENT_VERSION:-}" != "" ]]; then
|
||||||
|
metadata_agent_cpu_request="${METADATA_AGENT_CPU_REQUEST:-40m}"
|
||||||
|
metadata_agent_memory_request="${METADATA_AGENT_MEMORY_REQUEST:-50Mi}"
|
||||||
|
setup-addon-manifests "addons" "metadata-agent/stackdriver"
|
||||||
|
deployment_yaml="${dst_dir}/metadata-agent/stackdriver/metadata-agent.yaml"
|
||||||
|
sed -i -e "s@{{ metadata_agent_version }}@${METADATA_AGENT_VERSION}@g" "${deployment_yaml}"
|
||||||
|
sed -i -e "s@{{ metadata_agent_cpu_request }}@${metadata_agent_cpu_request}@g" "${deployment_yaml}"
|
||||||
|
sed -i -e "s@{{ metadata_agent_memory_request }}@${metadata_agent_memory_request}@g" "${deployment_yaml}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
|
if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
|
||||||
setup-addon-manifests "addons" "metrics-server"
|
setup-addon-manifests "addons" "metrics-server"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -2076,6 +2076,20 @@ EOF
|
|||||||
sed -i -e "s@{{ use_new_resources }}@${use_new_resources}@g" "${controller_yaml}"
|
sed -i -e "s@{{ use_new_resources }}@${use_new_resources}@g" "${controller_yaml}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]] ||
|
||||||
|
([[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]] &&
|
||||||
|
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]); then
|
||||||
|
if [[ "${ENABLE_METADATA_AGENT:-}" == "stackdriver" ]] &&
|
||||||
|
[[ "${METADATA_AGENT_VERSION:-}" != "" ]]; then
|
||||||
|
metadata_agent_cpu_request="${METADATA_AGENT_CPU_REQUEST:-40m}"
|
||||||
|
metadata_agent_memory_request="${METADATA_AGENT_MEMORY_REQUEST:-50Mi}"
|
||||||
|
setup-addon-manifests "addons" "metadata-agent/stackdriver"
|
||||||
|
deployment_yaml="${dst_dir}/metadata-agent/stackdriver/metadata-agent.yaml"
|
||||||
|
sed -i -e "s@{{ metadata_agent_version }}@${METADATA_AGENT_VERSION}@g" "${deployment_yaml}"
|
||||||
|
sed -i -e "s@{{ metadata_agent_cpu_request }}@${metadata_agent_cpu_request}@g" "${deployment_yaml}"
|
||||||
|
sed -i -e "s@{{ metadata_agent_memory_request }}@${metadata_agent_memory_request}@g" "${deployment_yaml}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
|
if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
|
||||||
setup-addon-manifests "addons" "metrics-server"
|
setup-addon-manifests "addons" "metrics-server"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -1526,14 +1526,20 @@ function check-cluster() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
local start_time=$(date +%s)
|
local start_time=$(date +%s)
|
||||||
|
local curl_out=$(mktemp)
|
||||||
|
kube::util::trap_add "rm -f ${curl_out}" EXIT
|
||||||
until curl --cacert "${CERT_DIR}/pki/ca.crt" \
|
until curl --cacert "${CERT_DIR}/pki/ca.crt" \
|
||||||
-H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \
|
-H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \
|
||||||
${secure} \
|
${secure} \
|
||||||
--max-time 5 --fail --output /dev/null --silent \
|
--max-time 5 --fail \
|
||||||
"https://${KUBE_MASTER_IP}/api/v1/pods"; do
|
"https://${KUBE_MASTER_IP}/api/v1/pods" > "${curl_out}" 2>&1; do
|
||||||
local elapsed=$(($(date +%s) - ${start_time}))
|
local elapsed=$(($(date +%s) - ${start_time}))
|
||||||
if [[ ${elapsed} -gt ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} ]]; then
|
if [[ ${elapsed} -gt ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} ]]; then
|
||||||
echo -e "${color_red}Cluster failed to initialize within ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} seconds.${color_norm}" >&2
|
echo -e "${color_red}Cluster failed to initialize within ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} seconds.${color_norm}" >&2
|
||||||
|
echo "Last output from querying API server follows:" >&2
|
||||||
|
echo "-----------------------------------------------------" >&2
|
||||||
|
cat "${curl_out}" >&2
|
||||||
|
echo "-----------------------------------------------------" >&2
|
||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
printf "."
|
printf "."
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ source "$KUBE_ROOT/cluster/common.sh"
|
|||||||
|
|
||||||
export LIBVIRT_DEFAULT_URI=qemu:///system
|
export LIBVIRT_DEFAULT_URI=qemu:///system
|
||||||
export SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-true}
|
export SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-true}
|
||||||
export ADMISSION_CONTROL=${ADMISSION_CONTROL:-Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,ResourceQuota}
|
export ADMISSION_CONTROL=${ADMISSION_CONTROL:-Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,PVCProtection,ResourceQuota}
|
||||||
readonly POOL=kubernetes
|
readonly POOL=kubernetes
|
||||||
readonly POOL_PATH=/var/lib/libvirt/images/kubernetes
|
readonly POOL_PATH=/var/lib/libvirt/images/kubernetes
|
||||||
|
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ MASTER_PASSWD="${MASTER_PASSWD:-vagrant}"
|
|||||||
|
|
||||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||||
# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
|
# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
|
||||||
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota
|
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,PVCProtection,ResourceQuota
|
||||||
|
|
||||||
# Optional: Enable node logging.
|
# Optional: Enable node logging.
|
||||||
ENABLE_NODE_LOGGING=false
|
ENABLE_NODE_LOGGING=false
|
||||||
@@ -120,4 +120,3 @@ E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
|
|||||||
|
|
||||||
# Default fallback NETWORK_IF_NAME, will be used in case when no 'VAGRANT-BEGIN' comments were defined in network-script
|
# Default fallback NETWORK_IF_NAME, will be used in case when no 'VAGRANT-BEGIN' comments were defined in network-script
|
||||||
export DEFAULT_NETWORK_IF_NAME="eth0"
|
export DEFAULT_NETWORK_IF_NAME="eth0"
|
||||||
|
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ go_library(
|
|||||||
"//plugin/pkg/admission/noderestriction:go_default_library",
|
"//plugin/pkg/admission/noderestriction:go_default_library",
|
||||||
"//plugin/pkg/admission/persistentvolume/label:go_default_library",
|
"//plugin/pkg/admission/persistentvolume/label:go_default_library",
|
||||||
"//plugin/pkg/admission/persistentvolume/resize:go_default_library",
|
"//plugin/pkg/admission/persistentvolume/resize:go_default_library",
|
||||||
|
"//plugin/pkg/admission/persistentvolumeclaim/pvcprotection:go_default_library",
|
||||||
"//plugin/pkg/admission/podnodeselector:go_default_library",
|
"//plugin/pkg/admission/podnodeselector:go_default_library",
|
||||||
"//plugin/pkg/admission/podpreset:go_default_library",
|
"//plugin/pkg/admission/podpreset:go_default_library",
|
||||||
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",
|
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",
|
||||||
|
|||||||
@@ -42,6 +42,7 @@ import (
|
|||||||
"k8s.io/kubernetes/plugin/pkg/admission/noderestriction"
|
"k8s.io/kubernetes/plugin/pkg/admission/noderestriction"
|
||||||
"k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label"
|
"k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label"
|
||||||
"k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize"
|
"k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize"
|
||||||
|
"k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection"
|
||||||
"k8s.io/kubernetes/plugin/pkg/admission/podnodeselector"
|
"k8s.io/kubernetes/plugin/pkg/admission/podnodeselector"
|
||||||
"k8s.io/kubernetes/plugin/pkg/admission/podpreset"
|
"k8s.io/kubernetes/plugin/pkg/admission/podpreset"
|
||||||
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
|
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
|
||||||
@@ -81,4 +82,5 @@ func RegisterAllAdmissionPlugins(plugins *admission.Plugins) {
|
|||||||
serviceaccount.Register(plugins)
|
serviceaccount.Register(plugins)
|
||||||
setdefault.Register(plugins)
|
setdefault.Register(plugins)
|
||||||
resize.Register(plugins)
|
resize.Register(plugins)
|
||||||
|
pvcprotection.Register(plugins)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,12 +43,6 @@ go_library(
|
|||||||
"//pkg/apis/storage/install:go_default_library",
|
"//pkg/apis/storage/install:go_default_library",
|
||||||
"//pkg/cloudprovider:go_default_library",
|
"//pkg/cloudprovider:go_default_library",
|
||||||
"//pkg/cloudprovider/providers:go_default_library",
|
"//pkg/cloudprovider/providers:go_default_library",
|
||||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
|
||||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
|
||||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
|
||||||
"//pkg/cloudprovider/providers/openstack:go_default_library",
|
|
||||||
"//pkg/cloudprovider/providers/photon:go_default_library",
|
|
||||||
"//pkg/cloudprovider/providers/vsphere:go_default_library",
|
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/bootstrap:go_default_library",
|
"//pkg/controller/bootstrap:go_default_library",
|
||||||
"//pkg/controller/certificates/approver:go_default_library",
|
"//pkg/controller/certificates/approver:go_default_library",
|
||||||
@@ -79,6 +73,7 @@ go_library(
|
|||||||
"//pkg/controller/volume/attachdetach:go_default_library",
|
"//pkg/controller/volume/attachdetach:go_default_library",
|
||||||
"//pkg/controller/volume/expand:go_default_library",
|
"//pkg/controller/volume/expand:go_default_library",
|
||||||
"//pkg/controller/volume/persistentvolume:go_default_library",
|
"//pkg/controller/volume/persistentvolume:go_default_library",
|
||||||
|
"//pkg/controller/volume/pvcprotection:go_default_library",
|
||||||
"//pkg/features:go_default_library",
|
"//pkg/features:go_default_library",
|
||||||
"//pkg/quota/generic:go_default_library",
|
"//pkg/quota/generic:go_default_library",
|
||||||
"//pkg/quota/install:go_default_library",
|
"//pkg/quota/install:go_default_library",
|
||||||
|
|||||||
@@ -359,6 +359,7 @@ func NewControllerInitializers() map[string]InitFunc {
|
|||||||
controllers["attachdetach"] = startAttachDetachController
|
controllers["attachdetach"] = startAttachDetachController
|
||||||
controllers["persistentvolume-expander"] = startVolumeExpandController
|
controllers["persistentvolume-expander"] = startVolumeExpandController
|
||||||
controllers["clusterrole-aggregation"] = startClusterRoleAggregrationController
|
controllers["clusterrole-aggregation"] = startClusterRoleAggregrationController
|
||||||
|
controllers["pvc-protection"] = startPVCProtectionController
|
||||||
|
|
||||||
return controllers
|
return controllers
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,6 +53,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach"
|
"k8s.io/kubernetes/pkg/controller/volume/attachdetach"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/expand"
|
"k8s.io/kubernetes/pkg/controller/volume/expand"
|
||||||
persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||||
|
"k8s.io/kubernetes/pkg/controller/volume/pvcprotection"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/quota/generic"
|
"k8s.io/kubernetes/pkg/quota/generic"
|
||||||
quotainstall "k8s.io/kubernetes/pkg/quota/install"
|
quotainstall "k8s.io/kubernetes/pkg/quota/install"
|
||||||
@@ -376,3 +377,15 @@ func startGarbageCollectorController(ctx ControllerContext) (bool, error) {
|
|||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func startPVCProtectionController(ctx ControllerContext) (bool, error) {
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.PVCProtection) {
|
||||||
|
go pvcprotection.NewPVCProtectionController(
|
||||||
|
ctx.InformerFactory.Core().V1().PersistentVolumeClaims(),
|
||||||
|
ctx.InformerFactory.Core().V1().Pods(),
|
||||||
|
ctx.ClientBuilder.ClientOrDie("pvc-protection-controller"),
|
||||||
|
).Run(1, ctx.Stop)
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -30,12 +30,6 @@ import (
|
|||||||
// Volume plugins
|
// Volume plugins
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
|
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/photon"
|
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
|
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/aws_ebs"
|
"k8s.io/kubernetes/pkg/volume/aws_ebs"
|
||||||
"k8s.io/kubernetes/pkg/volume/azure_dd"
|
"k8s.io/kubernetes/pkg/volume/azure_dd"
|
||||||
@@ -165,22 +159,12 @@ func ProbeControllerVolumePlugins(cloud cloudprovider.Interface, config componen
|
|||||||
allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
|
||||||
|
|
||||||
if cloud != nil {
|
allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...)
|
||||||
switch {
|
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...)
|
||||||
case aws.ProviderName == cloud.ProviderName():
|
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...)
|
||||||
case gce.ProviderName == cloud.ProviderName():
|
allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...)
|
||||||
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...)
|
allPlugins = append(allPlugins, photon_pd.ProbeVolumePlugins()...)
|
||||||
case openstack.ProviderName == cloud.ProviderName():
|
|
||||||
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...)
|
|
||||||
case vsphere.ProviderName == cloud.ProviderName():
|
|
||||||
allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...)
|
|
||||||
case azure.CloudProviderName == cloud.ProviderName():
|
|
||||||
allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...)
|
|
||||||
case photon.ProviderName == cloud.ProviderName():
|
|
||||||
allPlugins = append(allPlugins, photon_pd.ProbeVolumePlugins()...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return allPlugins
|
return allPlugins
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -218,7 +218,7 @@ func (o *Options) Run() error {
|
|||||||
return o.writeConfigFile()
|
return o.writeConfigFile()
|
||||||
}
|
}
|
||||||
|
|
||||||
proxyServer, err := NewProxyServer(o.config, o.CleanupAndExit, o.CleanupIPVS, o.scheme, o.master)
|
proxyServer, err := NewProxyServer(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -54,7 +54,17 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// NewProxyServer returns a new ProxyServer.
|
// NewProxyServer returns a new ProxyServer.
|
||||||
func NewProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExit bool, cleanupIPVS bool, scheme *runtime.Scheme, master string) (*ProxyServer, error) {
|
func NewProxyServer(o *Options) (*ProxyServer, error) {
|
||||||
|
return newProxyServer(o.config, o.CleanupAndExit, o.CleanupIPVS, o.scheme, o.master)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newProxyServer(
|
||||||
|
config *proxyconfigapi.KubeProxyConfiguration,
|
||||||
|
cleanupAndExit bool,
|
||||||
|
cleanupIPVS bool,
|
||||||
|
scheme *runtime.Scheme,
|
||||||
|
master string) (*ProxyServer, error) {
|
||||||
|
|
||||||
if config == nil {
|
if config == nil {
|
||||||
return nil, errors.New("config is required")
|
return nil, errors.New("config is required")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -162,7 +162,7 @@ func TestProxyServerWithCleanupAndExit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
options.CleanupAndExit = true
|
options.CleanupAndExit = true
|
||||||
|
|
||||||
proxyserver, err := NewProxyServer(options.config, options.CleanupAndExit, options.CleanupIPVS, options.scheme, options.master)
|
proxyserver, err := NewProxyServer(options)
|
||||||
|
|
||||||
assert.Nil(t, err, "unexpected error in NewProxyServer, addr: %s", addr)
|
assert.Nil(t, err, "unexpected error in NewProxyServer, addr: %s", addr)
|
||||||
assert.NotNil(t, proxyserver, "nil proxy server obj, addr: %s", addr)
|
assert.NotNil(t, proxyserver, "nil proxy server obj, addr: %s", addr)
|
||||||
|
|||||||
@@ -46,7 +46,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// NewProxyServer returns a new ProxyServer.
|
// NewProxyServer returns a new ProxyServer.
|
||||||
func NewProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExit bool, scheme *runtime.Scheme, master string) (*ProxyServer, error) {
|
func NewProxyServer(o *Options) (*ProxyServer, error) {
|
||||||
|
return newProxyServer(o.config, o.CleanupAndExit, o.scheme, o.master)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExit bool, scheme *runtime.Scheme, master string) (*ProxyServer, error) {
|
||||||
if config == nil {
|
if config == nil {
|
||||||
return nil, errors.New("config is required")
|
return nil, errors.New("config is required")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -331,25 +331,22 @@ func ValidateAPIEndpoint(c *kubeadm.MasterConfiguration, fldPath *field.Path) fi
|
|||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateIgnoreChecksErrors validates duplicates in ignore-checks-errors flag.
|
// ValidateIgnorePreflightErrors validates duplicates in ignore-preflight-errors flag.
|
||||||
func ValidateIgnoreChecksErrors(ignoreChecksErrors []string, skipPreflightChecks bool) (sets.String, error) {
|
func ValidateIgnorePreflightErrors(ignorePreflightErrors []string, skipPreflightChecks bool) (sets.String, error) {
|
||||||
ignoreErrors := sets.NewString()
|
ignoreErrors := sets.NewString()
|
||||||
allErrs := field.ErrorList{}
|
allErrs := field.ErrorList{}
|
||||||
|
|
||||||
for _, item := range ignoreChecksErrors {
|
for _, item := range ignorePreflightErrors {
|
||||||
ignoreErrors.Insert(strings.ToLower(item)) // parameters are case insensitive
|
ignoreErrors.Insert(strings.ToLower(item)) // parameters are case insensitive
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: remove once deprecated flag --skip-preflight-checks is removed.
|
// TODO: remove once deprecated flag --skip-preflight-checks is removed.
|
||||||
if skipPreflightChecks {
|
if skipPreflightChecks {
|
||||||
if ignoreErrors.Has("all") {
|
|
||||||
allErrs = append(allErrs, field.Invalid(field.NewPath("ignore-checks-errors"), strings.Join(ignoreErrors.List(), ","), "'all' is used together with deprecated flag --skip-preflight-checks. Remove deprecated flag"))
|
|
||||||
}
|
|
||||||
ignoreErrors.Insert("all")
|
ignoreErrors.Insert("all")
|
||||||
}
|
}
|
||||||
|
|
||||||
if ignoreErrors.Has("all") && ignoreErrors.Len() > 1 {
|
if ignoreErrors.Has("all") && ignoreErrors.Len() > 1 {
|
||||||
allErrs = append(allErrs, field.Invalid(field.NewPath("ignore-checks-errors"), strings.Join(ignoreErrors.List(), ","), "don't specify individual checks if 'all' is used"))
|
allErrs = append(allErrs, field.Invalid(field.NewPath("ignore-preflight-errors"), strings.Join(ignoreErrors.List(), ","), "don't specify individual checks if 'all' is used"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return ignoreErrors, allErrs.ToAggregate()
|
return ignoreErrors, allErrs.ToAggregate()
|
||||||
|
|||||||
@@ -459,12 +459,12 @@ func TestValidateFeatureGates(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateIgnoreChecksErrors(t *testing.T) {
|
func TestValidateIgnorePreflightErrors(t *testing.T) {
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
ignoreChecksErrors []string
|
ignorePreflightErrors []string
|
||||||
skipPreflightChecks bool
|
skipPreflightChecks bool
|
||||||
expectedLen int
|
expectedLen int
|
||||||
expectedError bool
|
expectedError bool
|
||||||
}{
|
}{
|
||||||
{[]string{}, false, 0, false}, // empty list, no old skip-preflight-checks
|
{[]string{}, false, 0, false}, // empty list, no old skip-preflight-checks
|
||||||
{[]string{}, true, 1, false}, // empty list, old skip-preflight-checks
|
{[]string{}, true, 1, false}, // empty list, old skip-preflight-checks
|
||||||
@@ -473,17 +473,17 @@ func TestValidateIgnoreChecksErrors(t *testing.T) {
|
|||||||
{[]string{"check1", "check2", "check1"}, false, 2, false}, // duplicates
|
{[]string{"check1", "check2", "check1"}, false, 2, false}, // duplicates
|
||||||
{[]string{"check1", "check2", "all"}, false, 3, true}, // non-duplicate, but 'all' present together wth individual checks
|
{[]string{"check1", "check2", "all"}, false, 3, true}, // non-duplicate, but 'all' present together wth individual checks
|
||||||
{[]string{"all"}, false, 1, false}, // skip all checks by using new flag
|
{[]string{"all"}, false, 1, false}, // skip all checks by using new flag
|
||||||
{[]string{"all"}, true, 1, true}, // skip all checks by using both old and new flags at the same time
|
{[]string{"all"}, true, 1, false}, // skip all checks by using both old and new flags at the same time
|
||||||
}
|
}
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
result, err := ValidateIgnoreChecksErrors(rt.ignoreChecksErrors, rt.skipPreflightChecks)
|
result, err := ValidateIgnorePreflightErrors(rt.ignorePreflightErrors, rt.skipPreflightChecks)
|
||||||
switch {
|
switch {
|
||||||
case err != nil && !rt.expectedError:
|
case err != nil && !rt.expectedError:
|
||||||
t.Errorf("ValidateIgnoreChecksErrors: unexpected error for input (%s, %v), error: %v", rt.ignoreChecksErrors, rt.skipPreflightChecks, err)
|
t.Errorf("ValidateIgnorePreflightErrors: unexpected error for input (%s, %v), error: %v", rt.ignorePreflightErrors, rt.skipPreflightChecks, err)
|
||||||
case err == nil && rt.expectedError:
|
case err == nil && rt.expectedError:
|
||||||
t.Errorf("ValidateIgnoreChecksErrors: expected error for input (%s, %v) but got: %v", rt.ignoreChecksErrors, rt.skipPreflightChecks, result)
|
t.Errorf("ValidateIgnorePreflightErrors: expected error for input (%s, %v) but got: %v", rt.ignorePreflightErrors, rt.skipPreflightChecks, result)
|
||||||
case result.Len() != rt.expectedLen:
|
case result.Len() != rt.expectedLen:
|
||||||
t.Errorf("ValidateIgnoreChecksErrors: expected Len = %d for input (%s, %v) but got: %v, %v", rt.expectedLen, rt.ignoreChecksErrors, rt.skipPreflightChecks, result.Len(), result)
|
t.Errorf("ValidateIgnorePreflightErrors: expected Len = %d for input (%s, %v) but got: %v, %v", rt.expectedLen, rt.ignorePreflightErrors, rt.skipPreflightChecks, result.Len(), result)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -113,7 +113,7 @@ func NewCmdInit(out io.Writer) *cobra.Command {
|
|||||||
var dryRun bool
|
var dryRun bool
|
||||||
var featureGatesString string
|
var featureGatesString string
|
||||||
var criSocket string
|
var criSocket string
|
||||||
var ignoreChecksErrors []string
|
var ignorePreflightErrors []string
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "init",
|
Use: "init",
|
||||||
@@ -128,10 +128,10 @@ func NewCmdInit(out io.Writer) *cobra.Command {
|
|||||||
internalcfg := &kubeadmapi.MasterConfiguration{}
|
internalcfg := &kubeadmapi.MasterConfiguration{}
|
||||||
legacyscheme.Scheme.Convert(cfg, internalcfg, nil)
|
legacyscheme.Scheme.Convert(cfg, internalcfg, nil)
|
||||||
|
|
||||||
ignoreChecksErrorsSet, err := validation.ValidateIgnoreChecksErrors(ignoreChecksErrors, skipPreFlight)
|
ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(ignorePreflightErrors, skipPreFlight)
|
||||||
kubeadmutil.CheckErr(err)
|
kubeadmutil.CheckErr(err)
|
||||||
|
|
||||||
i, err := NewInit(cfgPath, internalcfg, ignoreChecksErrorsSet, skipTokenPrint, dryRun, criSocket)
|
i, err := NewInit(cfgPath, internalcfg, ignorePreflightErrorsSet, skipTokenPrint, dryRun, criSocket)
|
||||||
kubeadmutil.CheckErr(err)
|
kubeadmutil.CheckErr(err)
|
||||||
kubeadmutil.CheckErr(i.Validate(cmd))
|
kubeadmutil.CheckErr(i.Validate(cmd))
|
||||||
kubeadmutil.CheckErr(i.Run(out))
|
kubeadmutil.CheckErr(i.Run(out))
|
||||||
@@ -139,7 +139,7 @@ func NewCmdInit(out io.Writer) *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
AddInitConfigFlags(cmd.PersistentFlags(), cfg, &featureGatesString)
|
AddInitConfigFlags(cmd.PersistentFlags(), cfg, &featureGatesString)
|
||||||
AddInitOtherFlags(cmd.PersistentFlags(), &cfgPath, &skipPreFlight, &skipTokenPrint, &dryRun, &criSocket, &ignoreChecksErrors)
|
AddInitOtherFlags(cmd.PersistentFlags(), &cfgPath, &skipPreFlight, &skipTokenPrint, &dryRun, &criSocket, &ignorePreflightErrors)
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
@@ -195,13 +195,13 @@ func AddInitConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiext.MasterConfigur
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AddInitOtherFlags adds init flags that are not bound to a configuration file to the given flagset
|
// AddInitOtherFlags adds init flags that are not bound to a configuration file to the given flagset
|
||||||
func AddInitOtherFlags(flagSet *flag.FlagSet, cfgPath *string, skipPreFlight, skipTokenPrint, dryRun *bool, criSocket *string, ignoreChecksErrors *[]string) {
|
func AddInitOtherFlags(flagSet *flag.FlagSet, cfgPath *string, skipPreFlight, skipTokenPrint, dryRun *bool, criSocket *string, ignorePreflightErrors *[]string) {
|
||||||
flagSet.StringVar(
|
flagSet.StringVar(
|
||||||
cfgPath, "config", *cfgPath,
|
cfgPath, "config", *cfgPath,
|
||||||
"Path to kubeadm config file. WARNING: Usage of a configuration file is experimental.",
|
"Path to kubeadm config file. WARNING: Usage of a configuration file is experimental.",
|
||||||
)
|
)
|
||||||
flagSet.StringSliceVar(
|
flagSet.StringSliceVar(
|
||||||
ignoreChecksErrors, "ignore-checks-errors", *ignoreChecksErrors,
|
ignorePreflightErrors, "ignore-preflight-errors", *ignorePreflightErrors,
|
||||||
"A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.",
|
"A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.",
|
||||||
)
|
)
|
||||||
// Note: All flags that are not bound to the cfg object should be whitelisted in cmd/kubeadm/app/apis/kubeadm/validation/validation.go
|
// Note: All flags that are not bound to the cfg object should be whitelisted in cmd/kubeadm/app/apis/kubeadm/validation/validation.go
|
||||||
@@ -209,7 +209,7 @@ func AddInitOtherFlags(flagSet *flag.FlagSet, cfgPath *string, skipPreFlight, sk
|
|||||||
skipPreFlight, "skip-preflight-checks", *skipPreFlight,
|
skipPreFlight, "skip-preflight-checks", *skipPreFlight,
|
||||||
"Skip preflight checks which normally run before modifying the system.",
|
"Skip preflight checks which normally run before modifying the system.",
|
||||||
)
|
)
|
||||||
flagSet.MarkDeprecated("skip-preflight-checks", "it is now equivalent to --ignore-checks-errors=all")
|
flagSet.MarkDeprecated("skip-preflight-checks", "it is now equivalent to --ignore-preflight-errors=all")
|
||||||
// Note: All flags that are not bound to the cfg object should be whitelisted in cmd/kubeadm/app/apis/kubeadm/validation/validation.go
|
// Note: All flags that are not bound to the cfg object should be whitelisted in cmd/kubeadm/app/apis/kubeadm/validation/validation.go
|
||||||
flagSet.BoolVar(
|
flagSet.BoolVar(
|
||||||
skipTokenPrint, "skip-token-print", *skipTokenPrint,
|
skipTokenPrint, "skip-token-print", *skipTokenPrint,
|
||||||
@@ -227,7 +227,7 @@ func AddInitOtherFlags(flagSet *flag.FlagSet, cfgPath *string, skipPreFlight, sk
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewInit validates given arguments and instantiates Init struct with provided information.
|
// NewInit validates given arguments and instantiates Init struct with provided information.
|
||||||
func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, ignoreChecksErrors sets.String, skipTokenPrint, dryRun bool, criSocket string) (*Init, error) {
|
func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, ignorePreflightErrors sets.String, skipTokenPrint, dryRun bool, criSocket string) (*Init, error) {
|
||||||
fmt.Println("[kubeadm] WARNING: kubeadm is currently in beta")
|
fmt.Println("[kubeadm] WARNING: kubeadm is currently in beta")
|
||||||
|
|
||||||
if cfgPath != "" {
|
if cfgPath != "" {
|
||||||
@@ -261,12 +261,12 @@ func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, ignoreChecksEr
|
|||||||
|
|
||||||
fmt.Println("[preflight] Running pre-flight checks.")
|
fmt.Println("[preflight] Running pre-flight checks.")
|
||||||
|
|
||||||
if err := preflight.RunInitMasterChecks(utilsexec.New(), cfg, criSocket, ignoreChecksErrors); err != nil {
|
if err := preflight.RunInitMasterChecks(utilsexec.New(), cfg, criSocket, ignorePreflightErrors); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to start the kubelet service in case it's inactive
|
// Try to start the kubelet service in case it's inactive
|
||||||
preflight.TryStartKubelet(ignoreChecksErrors)
|
preflight.TryStartKubelet(ignorePreflightErrors)
|
||||||
|
|
||||||
return &Init{cfg: cfg, skipTokenPrint: skipTokenPrint, dryRun: dryRun}, nil
|
return &Init{cfg: cfg, skipTokenPrint: skipTokenPrint, dryRun: dryRun}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -108,7 +108,7 @@ func NewCmdJoin(out io.Writer) *cobra.Command {
|
|||||||
var cfgPath string
|
var cfgPath string
|
||||||
var criSocket string
|
var criSocket string
|
||||||
var featureGatesString string
|
var featureGatesString string
|
||||||
var ignoreChecksErrors []string
|
var ignorePreflightErrors []string
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "join [flags]",
|
Use: "join [flags]",
|
||||||
@@ -126,10 +126,10 @@ func NewCmdJoin(out io.Writer) *cobra.Command {
|
|||||||
internalcfg := &kubeadmapi.NodeConfiguration{}
|
internalcfg := &kubeadmapi.NodeConfiguration{}
|
||||||
legacyscheme.Scheme.Convert(cfg, internalcfg, nil)
|
legacyscheme.Scheme.Convert(cfg, internalcfg, nil)
|
||||||
|
|
||||||
ignoreChecksErrorsSet, err := validation.ValidateIgnoreChecksErrors(ignoreChecksErrors, skipPreFlight)
|
ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(ignorePreflightErrors, skipPreFlight)
|
||||||
kubeadmutil.CheckErr(err)
|
kubeadmutil.CheckErr(err)
|
||||||
|
|
||||||
j, err := NewJoin(cfgPath, args, internalcfg, ignoreChecksErrorsSet, criSocket)
|
j, err := NewJoin(cfgPath, args, internalcfg, ignorePreflightErrorsSet, criSocket)
|
||||||
kubeadmutil.CheckErr(err)
|
kubeadmutil.CheckErr(err)
|
||||||
kubeadmutil.CheckErr(j.Validate(cmd))
|
kubeadmutil.CheckErr(j.Validate(cmd))
|
||||||
kubeadmutil.CheckErr(j.Run(out))
|
kubeadmutil.CheckErr(j.Run(out))
|
||||||
@@ -137,7 +137,7 @@ func NewCmdJoin(out io.Writer) *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
AddJoinConfigFlags(cmd.PersistentFlags(), cfg, &featureGatesString)
|
AddJoinConfigFlags(cmd.PersistentFlags(), cfg, &featureGatesString)
|
||||||
AddJoinOtherFlags(cmd.PersistentFlags(), &cfgPath, &skipPreFlight, &criSocket, &ignoreChecksErrors)
|
AddJoinOtherFlags(cmd.PersistentFlags(), &cfgPath, &skipPreFlight, &criSocket, &ignorePreflightErrors)
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
@@ -172,20 +172,20 @@ func AddJoinConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiext.NodeConfigurat
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AddJoinOtherFlags adds join flags that are not bound to a configuration file to the given flagset
|
// AddJoinOtherFlags adds join flags that are not bound to a configuration file to the given flagset
|
||||||
func AddJoinOtherFlags(flagSet *flag.FlagSet, cfgPath *string, skipPreFlight *bool, criSocket *string, ignoreChecksErrors *[]string) {
|
func AddJoinOtherFlags(flagSet *flag.FlagSet, cfgPath *string, skipPreFlight *bool, criSocket *string, ignorePreflightErrors *[]string) {
|
||||||
flagSet.StringVar(
|
flagSet.StringVar(
|
||||||
cfgPath, "config", *cfgPath,
|
cfgPath, "config", *cfgPath,
|
||||||
"Path to kubeadm config file.")
|
"Path to kubeadm config file.")
|
||||||
|
|
||||||
flagSet.StringSliceVar(
|
flagSet.StringSliceVar(
|
||||||
ignoreChecksErrors, "ignore-checks-errors", *ignoreChecksErrors,
|
ignorePreflightErrors, "ignore-preflight-errors", *ignorePreflightErrors,
|
||||||
"A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.",
|
"A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.",
|
||||||
)
|
)
|
||||||
flagSet.BoolVar(
|
flagSet.BoolVar(
|
||||||
skipPreFlight, "skip-preflight-checks", false,
|
skipPreFlight, "skip-preflight-checks", false,
|
||||||
"Skip preflight checks which normally run before modifying the system.",
|
"Skip preflight checks which normally run before modifying the system.",
|
||||||
)
|
)
|
||||||
flagSet.MarkDeprecated("skip-preflight-checks", "it is now equivalent to --ignore-checks-errors=all")
|
flagSet.MarkDeprecated("skip-preflight-checks", "it is now equivalent to --ignore-preflight-errors=all")
|
||||||
flagSet.StringVar(
|
flagSet.StringVar(
|
||||||
criSocket, "cri-socket", "/var/run/dockershim.sock",
|
criSocket, "cri-socket", "/var/run/dockershim.sock",
|
||||||
`Specify the CRI socket to connect to.`,
|
`Specify the CRI socket to connect to.`,
|
||||||
@@ -198,7 +198,7 @@ type Join struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewJoin instantiates Join struct with given arguments
|
// NewJoin instantiates Join struct with given arguments
|
||||||
func NewJoin(cfgPath string, args []string, cfg *kubeadmapi.NodeConfiguration, ignoreChecksErrors sets.String, criSocket string) (*Join, error) {
|
func NewJoin(cfgPath string, args []string, cfg *kubeadmapi.NodeConfiguration, ignorePreflightErrors sets.String, criSocket string) (*Join, error) {
|
||||||
fmt.Println("[kubeadm] WARNING: kubeadm is currently in beta")
|
fmt.Println("[kubeadm] WARNING: kubeadm is currently in beta")
|
||||||
|
|
||||||
if cfg.NodeName == "" {
|
if cfg.NodeName == "" {
|
||||||
@@ -218,12 +218,12 @@ func NewJoin(cfgPath string, args []string, cfg *kubeadmapi.NodeConfiguration, i
|
|||||||
fmt.Println("[preflight] Running pre-flight checks.")
|
fmt.Println("[preflight] Running pre-flight checks.")
|
||||||
|
|
||||||
// Then continue with the others...
|
// Then continue with the others...
|
||||||
if err := preflight.RunJoinNodeChecks(utilsexec.New(), cfg, criSocket, ignoreChecksErrors); err != nil {
|
if err := preflight.RunJoinNodeChecks(utilsexec.New(), cfg, criSocket, ignorePreflightErrors); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to start the kubelet service in case it's inactive
|
// Try to start the kubelet service in case it's inactive
|
||||||
preflight.TryStartKubelet(ignoreChecksErrors)
|
preflight.TryStartKubelet(ignorePreflightErrors)
|
||||||
|
|
||||||
return &Join{cfg: cfg}, nil
|
return &Join{cfg: cfg}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -47,30 +47,30 @@ func NewCmdReset(out io.Writer) *cobra.Command {
|
|||||||
var skipPreFlight bool
|
var skipPreFlight bool
|
||||||
var certsDir string
|
var certsDir string
|
||||||
var criSocketPath string
|
var criSocketPath string
|
||||||
var ignoreChecksErrors []string
|
var ignorePreflightErrors []string
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "reset",
|
Use: "reset",
|
||||||
Short: "Run this to revert any changes made to this host by 'kubeadm init' or 'kubeadm join'.",
|
Short: "Run this to revert any changes made to this host by 'kubeadm init' or 'kubeadm join'.",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
ignoreChecksErrorsSet, err := validation.ValidateIgnoreChecksErrors(ignoreChecksErrors, skipPreFlight)
|
ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(ignorePreflightErrors, skipPreFlight)
|
||||||
kubeadmutil.CheckErr(err)
|
kubeadmutil.CheckErr(err)
|
||||||
|
|
||||||
r, err := NewReset(ignoreChecksErrorsSet, certsDir, criSocketPath)
|
r, err := NewReset(ignorePreflightErrorsSet, certsDir, criSocketPath)
|
||||||
kubeadmutil.CheckErr(err)
|
kubeadmutil.CheckErr(err)
|
||||||
kubeadmutil.CheckErr(r.Run(out))
|
kubeadmutil.CheckErr(r.Run(out))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.PersistentFlags().StringSliceVar(
|
cmd.PersistentFlags().StringSliceVar(
|
||||||
&ignoreChecksErrors, "ignore-checks-errors", ignoreChecksErrors,
|
&ignorePreflightErrors, "ignore-preflight-errors", ignorePreflightErrors,
|
||||||
"A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.",
|
"A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.",
|
||||||
)
|
)
|
||||||
cmd.PersistentFlags().BoolVar(
|
cmd.PersistentFlags().BoolVar(
|
||||||
&skipPreFlight, "skip-preflight-checks", false,
|
&skipPreFlight, "skip-preflight-checks", false,
|
||||||
"Skip preflight checks which normally run before modifying the system.",
|
"Skip preflight checks which normally run before modifying the system.",
|
||||||
)
|
)
|
||||||
cmd.PersistentFlags().MarkDeprecated("skip-preflight-checks", "it is now equivalent to --ignore-checks-errors=all")
|
cmd.PersistentFlags().MarkDeprecated("skip-preflight-checks", "it is now equivalent to --ignore-preflight-errors=all")
|
||||||
|
|
||||||
cmd.PersistentFlags().StringVar(
|
cmd.PersistentFlags().StringVar(
|
||||||
&certsDir, "cert-dir", kubeadmapiext.DefaultCertificatesDir,
|
&certsDir, "cert-dir", kubeadmapiext.DefaultCertificatesDir,
|
||||||
@@ -92,10 +92,10 @@ type Reset struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewReset instantiate Reset struct
|
// NewReset instantiate Reset struct
|
||||||
func NewReset(ignoreChecksErrors sets.String, certsDir, criSocketPath string) (*Reset, error) {
|
func NewReset(ignorePreflightErrors sets.String, certsDir, criSocketPath string) (*Reset, error) {
|
||||||
fmt.Println("[preflight] Running pre-flight checks.")
|
fmt.Println("[preflight] Running pre-flight checks.")
|
||||||
|
|
||||||
if err := preflight.RunRootCheckOnly(ignoreChecksErrors); err != nil {
|
if err := preflight.RunRootCheckOnly(ignorePreflightErrors); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -72,11 +72,11 @@ func NewCmdApply(parentFlags *cmdUpgradeFlags) *cobra.Command {
|
|||||||
Short: "Upgrade your Kubernetes cluster to the specified version.",
|
Short: "Upgrade your Kubernetes cluster to the specified version.",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
var err error
|
var err error
|
||||||
flags.parent.ignoreChecksErrorsSet, err = validation.ValidateIgnoreChecksErrors(flags.parent.ignoreChecksErrors, flags.parent.skipPreFlight)
|
flags.parent.ignorePreflightErrorsSet, err = validation.ValidateIgnorePreflightErrors(flags.parent.ignorePreflightErrors, flags.parent.skipPreFlight)
|
||||||
kubeadmutil.CheckErr(err)
|
kubeadmutil.CheckErr(err)
|
||||||
|
|
||||||
// Ensure the user is root
|
// Ensure the user is root
|
||||||
err = runPreflightChecks(flags.parent.ignoreChecksErrorsSet)
|
err = runPreflightChecks(flags.parent.ignorePreflightErrorsSet)
|
||||||
kubeadmutil.CheckErr(err)
|
kubeadmutil.CheckErr(err)
|
||||||
|
|
||||||
err = cmdutil.ValidateExactArgNumber(args, []string{"version"})
|
err = cmdutil.ValidateExactArgNumber(args, []string{"version"})
|
||||||
|
|||||||
@@ -107,9 +107,9 @@ func printConfiguration(cfg *kubeadmapiext.MasterConfiguration, w io.Writer) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// runPreflightChecks runs the root preflight check
|
// runPreflightChecks runs the root preflight check
|
||||||
func runPreflightChecks(ignoreChecksErrors sets.String) error {
|
func runPreflightChecks(ignorePreflightErrors sets.String) error {
|
||||||
fmt.Println("[preflight] Running pre-flight checks.")
|
fmt.Println("[preflight] Running pre-flight checks.")
|
||||||
return preflight.RunRootCheckOnly(ignoreChecksErrors)
|
return preflight.RunRootCheckOnly(ignorePreflightErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getClient gets a real or fake client depending on whether the user is dry-running or not
|
// getClient gets a real or fake client depending on whether the user is dry-running or not
|
||||||
|
|||||||
@@ -38,10 +38,10 @@ func NewCmdPlan(parentFlags *cmdUpgradeFlags) *cobra.Command {
|
|||||||
Short: "Check which versions are available to upgrade to and validate whether your current cluster is upgradeable.",
|
Short: "Check which versions are available to upgrade to and validate whether your current cluster is upgradeable.",
|
||||||
Run: func(_ *cobra.Command, _ []string) {
|
Run: func(_ *cobra.Command, _ []string) {
|
||||||
var err error
|
var err error
|
||||||
parentFlags.ignoreChecksErrorsSet, err = validation.ValidateIgnoreChecksErrors(parentFlags.ignoreChecksErrors, parentFlags.skipPreFlight)
|
parentFlags.ignorePreflightErrorsSet, err = validation.ValidateIgnorePreflightErrors(parentFlags.ignorePreflightErrors, parentFlags.skipPreFlight)
|
||||||
kubeadmutil.CheckErr(err)
|
kubeadmutil.CheckErr(err)
|
||||||
// Ensure the user is root
|
// Ensure the user is root
|
||||||
err = runPreflightChecks(parentFlags.ignoreChecksErrorsSet)
|
err = runPreflightChecks(parentFlags.ignorePreflightErrorsSet)
|
||||||
kubeadmutil.CheckErr(err)
|
kubeadmutil.CheckErr(err)
|
||||||
|
|
||||||
err = RunPlan(parentFlags)
|
err = RunPlan(parentFlags)
|
||||||
|
|||||||
@@ -35,8 +35,8 @@ type cmdUpgradeFlags struct {
|
|||||||
allowRCUpgrades bool
|
allowRCUpgrades bool
|
||||||
printConfig bool
|
printConfig bool
|
||||||
skipPreFlight bool
|
skipPreFlight bool
|
||||||
ignoreChecksErrors []string
|
ignorePreflightErrors []string
|
||||||
ignoreChecksErrorsSet sets.String
|
ignorePreflightErrorsSet sets.String
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCmdUpgrade returns the cobra command for `kubeadm upgrade`
|
// NewCmdUpgrade returns the cobra command for `kubeadm upgrade`
|
||||||
@@ -49,7 +49,7 @@ func NewCmdUpgrade(out io.Writer) *cobra.Command {
|
|||||||
allowRCUpgrades: false,
|
allowRCUpgrades: false,
|
||||||
printConfig: false,
|
printConfig: false,
|
||||||
skipPreFlight: false,
|
skipPreFlight: false,
|
||||||
ignoreChecksErrorsSet: sets.NewString(),
|
ignorePreflightErrorsSet: sets.NewString(),
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
@@ -63,9 +63,9 @@ func NewCmdUpgrade(out io.Writer) *cobra.Command {
|
|||||||
cmd.PersistentFlags().BoolVar(&flags.allowExperimentalUpgrades, "allow-experimental-upgrades", flags.allowExperimentalUpgrades, "Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes.")
|
cmd.PersistentFlags().BoolVar(&flags.allowExperimentalUpgrades, "allow-experimental-upgrades", flags.allowExperimentalUpgrades, "Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes.")
|
||||||
cmd.PersistentFlags().BoolVar(&flags.allowRCUpgrades, "allow-release-candidate-upgrades", flags.allowRCUpgrades, "Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes.")
|
cmd.PersistentFlags().BoolVar(&flags.allowRCUpgrades, "allow-release-candidate-upgrades", flags.allowRCUpgrades, "Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes.")
|
||||||
cmd.PersistentFlags().BoolVar(&flags.printConfig, "print-config", flags.printConfig, "Specifies whether the configuration file that will be used in the upgrade should be printed or not.")
|
cmd.PersistentFlags().BoolVar(&flags.printConfig, "print-config", flags.printConfig, "Specifies whether the configuration file that will be used in the upgrade should be printed or not.")
|
||||||
cmd.PersistentFlags().StringSliceVar(&flags.ignoreChecksErrors, "ignore-checks-errors", flags.ignoreChecksErrors, "A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.")
|
cmd.PersistentFlags().StringSliceVar(&flags.ignorePreflightErrors, "ignore-preflight-errors", flags.ignorePreflightErrors, "A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.")
|
||||||
cmd.PersistentFlags().BoolVar(&flags.skipPreFlight, "skip-preflight-checks", flags.skipPreFlight, "Skip preflight checks that normally run before modifying the system.")
|
cmd.PersistentFlags().BoolVar(&flags.skipPreFlight, "skip-preflight-checks", flags.skipPreFlight, "Skip preflight checks that normally run before modifying the system.")
|
||||||
cmd.PersistentFlags().MarkDeprecated("skip-preflight-checks", "it is now equivalent to --ignore-checks-errors=all")
|
cmd.PersistentFlags().MarkDeprecated("skip-preflight-checks", "it is now equivalent to --ignore-preflight-errors=all")
|
||||||
cmd.PersistentFlags().StringVar(&flags.featureGatesString, "feature-gates", flags.featureGatesString, "A set of key=value pairs that describe feature gates for various features."+
|
cmd.PersistentFlags().StringVar(&flags.featureGatesString, "feature-gates", flags.featureGatesString, "A set of key=value pairs that describe feature gates for various features."+
|
||||||
"Options are:\n"+strings.Join(features.KnownFeatures(&features.InitFeatureGates), "\n"))
|
"Options are:\n"+strings.Join(features.KnownFeatures(&features.InitFeatureGates), "\n"))
|
||||||
|
|
||||||
|
|||||||
@@ -236,7 +236,7 @@ var (
|
|||||||
SupportedEtcdVersion = map[uint8]string{
|
SupportedEtcdVersion = map[uint8]string{
|
||||||
8: "3.0.17",
|
8: "3.0.17",
|
||||||
9: "3.1.10",
|
9: "3.1.10",
|
||||||
10: "3.1.11",
|
10: "3.1.10",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -137,7 +137,7 @@ func TestEtcdSupportedVersion(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
kubernetesVersion: "1.10.0",
|
kubernetesVersion: "1.10.0",
|
||||||
expectedVersion: version.MustParseSemantic("3.1.11"),
|
expectedVersion: version.MustParseSemantic("3.1.10"),
|
||||||
expectedError: nil,
|
expectedError: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -513,6 +513,15 @@ func TestGetHostPathVolumesForTheControlPlane(t *testing.T) {
|
|||||||
|
|
||||||
for _, rt := range tests {
|
for _, rt := range tests {
|
||||||
mounts := getHostPathVolumesForTheControlPlane(rt.cfg)
|
mounts := getHostPathVolumesForTheControlPlane(rt.cfg)
|
||||||
|
|
||||||
|
// Avoid unit test errors when the flexvolume is mounted
|
||||||
|
if _, ok := mounts.volumes[kubeadmconstants.KubeControllerManager][flexvolumeDirVolumeName]; ok {
|
||||||
|
delete(mounts.volumes[kubeadmconstants.KubeControllerManager], flexvolumeDirVolumeName)
|
||||||
|
}
|
||||||
|
if _, ok := mounts.volumeMounts[kubeadmconstants.KubeControllerManager][flexvolumeDirVolumeName]; ok {
|
||||||
|
delete(mounts.volumeMounts[kubeadmconstants.KubeControllerManager], flexvolumeDirVolumeName)
|
||||||
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(mounts.volumes, rt.vol) {
|
if !reflect.DeepEqual(mounts.volumes, rt.vol) {
|
||||||
t.Errorf(
|
t.Errorf(
|
||||||
"failed getHostPathVolumesForTheControlPlane:\n\texpected: %v\n\t actual: %v",
|
"failed getHostPathVolumesForTheControlPlane:\n\texpected: %v\n\t actual: %v",
|
||||||
|
|||||||
@@ -430,7 +430,7 @@ func TestGetAvailableUpgrades(t *testing.T) {
|
|||||||
KubeVersion: "v1.10.0-alpha.2",
|
KubeVersion: "v1.10.0-alpha.2",
|
||||||
KubeadmVersion: "v1.10.0-alpha.2",
|
KubeadmVersion: "v1.10.0-alpha.2",
|
||||||
DNSVersion: "1.14.7",
|
DNSVersion: "1.14.7",
|
||||||
EtcdVersion: "3.1.11",
|
EtcdVersion: "3.1.10",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ type Error struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
return fmt.Sprintf("[preflight] Some fatal errors occurred:\n%s%s", e.Msg, "[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-checks-errors=...`")
|
return fmt.Sprintf("[preflight] Some fatal errors occurred:\n%s%s", e.Msg, "[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checker validates the state of the system to ensure kubeadm will be
|
// Checker validates the state of the system to ensure kubeadm will be
|
||||||
@@ -837,9 +837,9 @@ func getEtcdVersionResponse(client *http.Client, url string, target interface{})
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RunInitMasterChecks executes all individual, applicable to Master node checks.
|
// RunInitMasterChecks executes all individual, applicable to Master node checks.
|
||||||
func RunInitMasterChecks(execer utilsexec.Interface, cfg *kubeadmapi.MasterConfiguration, criSocket string, ignoreChecksErrors sets.String) error {
|
func RunInitMasterChecks(execer utilsexec.Interface, cfg *kubeadmapi.MasterConfiguration, criSocket string, ignorePreflightErrors sets.String) error {
|
||||||
// First, check if we're root separately from the other preflight checks and fail fast
|
// First, check if we're root separately from the other preflight checks and fail fast
|
||||||
if err := RunRootCheckOnly(ignoreChecksErrors); err != nil {
|
if err := RunRootCheckOnly(ignorePreflightErrors); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -929,13 +929,13 @@ func RunInitMasterChecks(execer utilsexec.Interface, cfg *kubeadmapi.MasterConfi
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return RunChecks(checks, os.Stderr, ignoreChecksErrors)
|
return RunChecks(checks, os.Stderr, ignorePreflightErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunJoinNodeChecks executes all individual, applicable to node checks.
|
// RunJoinNodeChecks executes all individual, applicable to node checks.
|
||||||
func RunJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.NodeConfiguration, criSocket string, ignoreChecksErrors sets.String) error {
|
func RunJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.NodeConfiguration, criSocket string, ignorePreflightErrors sets.String) error {
|
||||||
// First, check if we're root separately from the other preflight checks and fail fast
|
// First, check if we're root separately from the other preflight checks and fail fast
|
||||||
if err := RunRootCheckOnly(ignoreChecksErrors); err != nil {
|
if err := RunRootCheckOnly(ignorePreflightErrors); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -987,21 +987,21 @@ func RunJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.NodeConfigura
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return RunChecks(checks, os.Stderr, ignoreChecksErrors)
|
return RunChecks(checks, os.Stderr, ignorePreflightErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunRootCheckOnly initializes checks slice of structs and call RunChecks
|
// RunRootCheckOnly initializes checks slice of structs and call RunChecks
|
||||||
func RunRootCheckOnly(ignoreChecksErrors sets.String) error {
|
func RunRootCheckOnly(ignorePreflightErrors sets.String) error {
|
||||||
checks := []Checker{
|
checks := []Checker{
|
||||||
IsPrivilegedUserCheck{},
|
IsPrivilegedUserCheck{},
|
||||||
}
|
}
|
||||||
|
|
||||||
return RunChecks(checks, os.Stderr, ignoreChecksErrors)
|
return RunChecks(checks, os.Stderr, ignorePreflightErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunChecks runs each check, displays it's warnings/errors, and once all
|
// RunChecks runs each check, displays it's warnings/errors, and once all
|
||||||
// are processed will exit if any errors occurred.
|
// are processed will exit if any errors occurred.
|
||||||
func RunChecks(checks []Checker, ww io.Writer, ignoreChecksErrors sets.String) error {
|
func RunChecks(checks []Checker, ww io.Writer, ignorePreflightErrors sets.String) error {
|
||||||
type checkErrors struct {
|
type checkErrors struct {
|
||||||
Name string
|
Name string
|
||||||
Errors []error
|
Errors []error
|
||||||
@@ -1012,7 +1012,7 @@ func RunChecks(checks []Checker, ww io.Writer, ignoreChecksErrors sets.String) e
|
|||||||
name := c.Name()
|
name := c.Name()
|
||||||
warnings, errs := c.Check()
|
warnings, errs := c.Check()
|
||||||
|
|
||||||
if setHasItemOrAll(ignoreChecksErrors, name) {
|
if setHasItemOrAll(ignorePreflightErrors, name) {
|
||||||
// Decrease severity of errors to warnings for this check
|
// Decrease severity of errors to warnings for this check
|
||||||
warnings = append(warnings, errs...)
|
warnings = append(warnings, errs...)
|
||||||
errs = []error{}
|
errs = []error{}
|
||||||
@@ -1038,8 +1038,8 @@ func RunChecks(checks []Checker, ww io.Writer, ignoreChecksErrors sets.String) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TryStartKubelet attempts to bring up kubelet service
|
// TryStartKubelet attempts to bring up kubelet service
|
||||||
func TryStartKubelet(ignoreChecksErrors sets.String) {
|
func TryStartKubelet(ignorePreflightErrors sets.String) {
|
||||||
if setHasItemOrAll(ignoreChecksErrors, "StartKubelet") {
|
if setHasItemOrAll(ignorePreflightErrors, "StartKubelet") {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// If we notice that the kubelet service is inactive, try to start it
|
// If we notice that the kubelet service is inactive, try to start it
|
||||||
|
|||||||
@@ -619,3 +619,30 @@ func TestKubeletVersionCheck(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSetHasItemOrAll(t *testing.T) {
|
||||||
|
var tests = []struct {
|
||||||
|
ignoreSet sets.String
|
||||||
|
testString string
|
||||||
|
expectedResult bool
|
||||||
|
}{
|
||||||
|
{sets.NewString(), "foo", false},
|
||||||
|
{sets.NewString("all"), "foo", true},
|
||||||
|
{sets.NewString("all", "bar"), "foo", true},
|
||||||
|
{sets.NewString("bar"), "foo", false},
|
||||||
|
{sets.NewString("baz", "foo", "bar"), "foo", true},
|
||||||
|
{sets.NewString("baz", "bar", "foo"), "Foo", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, rt := range tests {
|
||||||
|
result := setHasItemOrAll(rt.ignoreSet, rt.testString)
|
||||||
|
if result != rt.expectedResult {
|
||||||
|
t.Errorf(
|
||||||
|
"setHasItemOrAll: expected: %v actual: %v (arguments: %q, %q)",
|
||||||
|
rt.expectedResult, result,
|
||||||
|
rt.ignoreSet,
|
||||||
|
rt.testString,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -6728,6 +6728,40 @@ Both these may change in the future. Incoming requests are matched against the h
|
|||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
<div class="sect2">
|
||||||
|
<h3 id="_v1beta1_allowedflexvolume">v1beta1.AllowedFlexVolume</h3>
|
||||||
|
<div class="paragraph">
|
||||||
|
<p>AllowedFlexVolume represents a single Flexvolume that is allowed to be used.</p>
|
||||||
|
</div>
|
||||||
|
<table class="tableblock frame-all grid-all" style="width:100%; ">
|
||||||
|
<colgroup>
|
||||||
|
<col style="width:20%;">
|
||||||
|
<col style="width:20%;">
|
||||||
|
<col style="width:20%;">
|
||||||
|
<col style="width:20%;">
|
||||||
|
<col style="width:20%;">
|
||||||
|
</colgroup>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th class="tableblock halign-left valign-top">Name</th>
|
||||||
|
<th class="tableblock halign-left valign-top">Description</th>
|
||||||
|
<th class="tableblock halign-left valign-top">Required</th>
|
||||||
|
<th class="tableblock halign-left valign-top">Schema</th>
|
||||||
|
<th class="tableblock halign-left valign-top">Default</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td class="tableblock halign-left valign-top"><p class="tableblock">driver</p></td>
|
||||||
|
<td class="tableblock halign-left valign-top"><p class="tableblock">Driver is the name of the Flexvolume driver.</p></td>
|
||||||
|
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
|
||||||
|
<td class="tableblock halign-left valign-top"><p class="tableblock">string</p></td>
|
||||||
|
<td class="tableblock halign-left valign-top"></td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
<div class="sect2">
|
<div class="sect2">
|
||||||
<h3 id="_v1_apiresource">v1.APIResource</h3>
|
<h3 id="_v1_apiresource">v1.APIResource</h3>
|
||||||
@@ -7972,6 +8006,13 @@ Both these may change in the future. Incoming requests are matched against the h
|
|||||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1beta1_allowedhostpath">v1beta1.AllowedHostPath</a> array</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1beta1_allowedhostpath">v1beta1.AllowedHostPath</a> array</p></td>
|
||||||
<td class="tableblock halign-left valign-top"></td>
|
<td class="tableblock halign-left valign-top"></td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td class="tableblock halign-left valign-top"><p class="tableblock">allowedFlexVolumes</p></td>
|
||||||
|
<td class="tableblock halign-left valign-top"><p class="tableblock">AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the "Volumes" field.</p></td>
|
||||||
|
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||||
|
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1beta1_allowedflexvolume">v1beta1.AllowedFlexVolume</a> array</p></td>
|
||||||
|
<td class="tableblock halign-left valign-top"></td>
|
||||||
|
</tr>
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
|
|||||||
@@ -22,6 +22,7 @@
|
|||||||
# source code.
|
# source code.
|
||||||
# KUBE_GIT_TREE_STATE - "clean" indicates no changes since the git commit id
|
# KUBE_GIT_TREE_STATE - "clean" indicates no changes since the git commit id
|
||||||
# "dirty" indicates source code changes after the git commit id
|
# "dirty" indicates source code changes after the git commit id
|
||||||
|
# "archive" indicates the tree was produced by 'git archive'
|
||||||
# KUBE_GIT_VERSION - "vX.Y" used to indicate the last release version.
|
# KUBE_GIT_VERSION - "vX.Y" used to indicate the last release version.
|
||||||
# KUBE_GIT_MAJOR - The major part of the version
|
# KUBE_GIT_MAJOR - The major part of the version
|
||||||
# KUBE_GIT_MINOR - The minor component of the version
|
# KUBE_GIT_MINOR - The minor component of the version
|
||||||
@@ -40,7 +41,7 @@ kube::version::get_version_vars() {
|
|||||||
# we likely don't have a git tree, but these magic values may be filled in.
|
# we likely don't have a git tree, but these magic values may be filled in.
|
||||||
if [[ '$Format:%%$' == "%" ]]; then
|
if [[ '$Format:%%$' == "%" ]]; then
|
||||||
KUBE_GIT_COMMIT='$Format:%H$'
|
KUBE_GIT_COMMIT='$Format:%H$'
|
||||||
KUBE_GIT_TREE_STATE="git archive"
|
KUBE_GIT_TREE_STATE="archive"
|
||||||
# When a 'git archive' is exported, the '$Format:%D$' below will look
|
# When a 'git archive' is exported, the '$Format:%D$' below will look
|
||||||
# something like 'HEAD -> release-1.8, tag: v1.8.3' where then 'tag: '
|
# something like 'HEAD -> release-1.8, tag: v1.8.3' where then 'tag: '
|
||||||
# can be extracted from it.
|
# can be extracted from it.
|
||||||
|
|||||||
@@ -860,6 +860,11 @@ type PodSecurityPolicySpec struct {
|
|||||||
// AllowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.
|
// AllowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.
|
||||||
// +optional
|
// +optional
|
||||||
AllowedHostPaths []AllowedHostPath
|
AllowedHostPaths []AllowedHostPath
|
||||||
|
// AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all
|
||||||
|
// Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes
|
||||||
|
// is allowed in the "Volumes" field.
|
||||||
|
// +optional
|
||||||
|
AllowedFlexVolumes []AllowedFlexVolume
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllowedHostPath defines the host volume conditions that will be enabled by a policy
|
// AllowedHostPath defines the host volume conditions that will be enabled by a policy
|
||||||
@@ -923,6 +928,12 @@ var (
|
|||||||
All FSType = "*"
|
All FSType = "*"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
|
||||||
|
type AllowedFlexVolume struct {
|
||||||
|
// Driver is the name of the Flexvolume driver.
|
||||||
|
Driver string
|
||||||
|
}
|
||||||
|
|
||||||
// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.
|
// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.
|
||||||
type SELinuxStrategyOptions struct {
|
type SELinuxStrategyOptions struct {
|
||||||
// Rule is the strategy that will dictate the allowable labels that may be set.
|
// Rule is the strategy that will dictate the allowable labels that may be set.
|
||||||
|
|||||||
@@ -41,6 +41,8 @@ func init() {
|
|||||||
// Public to allow building arbitrary schemes.
|
// Public to allow building arbitrary schemes.
|
||||||
func RegisterConversions(scheme *runtime.Scheme) error {
|
func RegisterConversions(scheme *runtime.Scheme) error {
|
||||||
return scheme.AddGeneratedConversionFuncs(
|
return scheme.AddGeneratedConversionFuncs(
|
||||||
|
Convert_v1beta1_AllowedFlexVolume_To_extensions_AllowedFlexVolume,
|
||||||
|
Convert_extensions_AllowedFlexVolume_To_v1beta1_AllowedFlexVolume,
|
||||||
Convert_v1beta1_AllowedHostPath_To_extensions_AllowedHostPath,
|
Convert_v1beta1_AllowedHostPath_To_extensions_AllowedHostPath,
|
||||||
Convert_extensions_AllowedHostPath_To_v1beta1_AllowedHostPath,
|
Convert_extensions_AllowedHostPath_To_v1beta1_AllowedHostPath,
|
||||||
Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus,
|
Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus,
|
||||||
@@ -140,6 +142,26 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func autoConvert_v1beta1_AllowedFlexVolume_To_extensions_AllowedFlexVolume(in *v1beta1.AllowedFlexVolume, out *extensions.AllowedFlexVolume, s conversion.Scope) error {
|
||||||
|
out.Driver = in.Driver
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert_v1beta1_AllowedFlexVolume_To_extensions_AllowedFlexVolume is an autogenerated conversion function.
|
||||||
|
func Convert_v1beta1_AllowedFlexVolume_To_extensions_AllowedFlexVolume(in *v1beta1.AllowedFlexVolume, out *extensions.AllowedFlexVolume, s conversion.Scope) error {
|
||||||
|
return autoConvert_v1beta1_AllowedFlexVolume_To_extensions_AllowedFlexVolume(in, out, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func autoConvert_extensions_AllowedFlexVolume_To_v1beta1_AllowedFlexVolume(in *extensions.AllowedFlexVolume, out *v1beta1.AllowedFlexVolume, s conversion.Scope) error {
|
||||||
|
out.Driver = in.Driver
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert_extensions_AllowedFlexVolume_To_v1beta1_AllowedFlexVolume is an autogenerated conversion function.
|
||||||
|
func Convert_extensions_AllowedFlexVolume_To_v1beta1_AllowedFlexVolume(in *extensions.AllowedFlexVolume, out *v1beta1.AllowedFlexVolume, s conversion.Scope) error {
|
||||||
|
return autoConvert_extensions_AllowedFlexVolume_To_v1beta1_AllowedFlexVolume(in, out, s)
|
||||||
|
}
|
||||||
|
|
||||||
func autoConvert_v1beta1_AllowedHostPath_To_extensions_AllowedHostPath(in *v1beta1.AllowedHostPath, out *extensions.AllowedHostPath, s conversion.Scope) error {
|
func autoConvert_v1beta1_AllowedHostPath_To_extensions_AllowedHostPath(in *v1beta1.AllowedHostPath, out *extensions.AllowedHostPath, s conversion.Scope) error {
|
||||||
out.PathPrefix = in.PathPrefix
|
out.PathPrefix = in.PathPrefix
|
||||||
return nil
|
return nil
|
||||||
@@ -1072,6 +1094,7 @@ func autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySp
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
out.AllowedHostPaths = *(*[]extensions.AllowedHostPath)(unsafe.Pointer(&in.AllowedHostPaths))
|
out.AllowedHostPaths = *(*[]extensions.AllowedHostPath)(unsafe.Pointer(&in.AllowedHostPaths))
|
||||||
|
out.AllowedFlexVolumes = *(*[]extensions.AllowedFlexVolume)(unsafe.Pointer(&in.AllowedFlexVolumes))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1108,6 +1131,7 @@ func autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySp
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
out.AllowedHostPaths = *(*[]v1beta1.AllowedHostPath)(unsafe.Pointer(&in.AllowedHostPaths))
|
out.AllowedHostPaths = *(*[]v1beta1.AllowedHostPath)(unsafe.Pointer(&in.AllowedHostPaths))
|
||||||
|
out.AllowedFlexVolumes = *(*[]v1beta1.AllowedFlexVolume)(unsafe.Pointer(&in.AllowedFlexVolumes))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -655,6 +655,7 @@ func ValidatePodSecurityPolicySpec(spec *extensions.PodSecurityPolicySpec, fldPa
|
|||||||
allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.AllowedCapabilities, field.NewPath("allowedCapabilities"))...)
|
allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.AllowedCapabilities, field.NewPath("allowedCapabilities"))...)
|
||||||
allErrs = append(allErrs, validatePSPDefaultAllowPrivilegeEscalation(fldPath.Child("defaultAllowPrivilegeEscalation"), spec.DefaultAllowPrivilegeEscalation, spec.AllowPrivilegeEscalation)...)
|
allErrs = append(allErrs, validatePSPDefaultAllowPrivilegeEscalation(fldPath.Child("defaultAllowPrivilegeEscalation"), spec.DefaultAllowPrivilegeEscalation, spec.AllowPrivilegeEscalation)...)
|
||||||
allErrs = append(allErrs, validatePSPAllowedHostPaths(fldPath.Child("allowedHostPaths"), spec.AllowedHostPaths)...)
|
allErrs = append(allErrs, validatePSPAllowedHostPaths(fldPath.Child("allowedHostPaths"), spec.AllowedHostPaths)...)
|
||||||
|
allErrs = append(allErrs, validatePSPAllowedFlexVolumes(fldPath.Child("allowedFlexVolumes"), spec.AllowedFlexVolumes)...)
|
||||||
|
|
||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
@@ -721,6 +722,20 @@ func validatePSPAllowedHostPaths(fldPath *field.Path, allowedHostPaths []extensi
|
|||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validatePSPAllowedFlexVolumes
|
||||||
|
func validatePSPAllowedFlexVolumes(fldPath *field.Path, flexVolumes []extensions.AllowedFlexVolume) field.ErrorList {
|
||||||
|
allErrs := field.ErrorList{}
|
||||||
|
if len(flexVolumes) > 0 {
|
||||||
|
for idx, fv := range flexVolumes {
|
||||||
|
if len(fv.Driver) == 0 {
|
||||||
|
allErrs = append(allErrs, field.Required(fldPath.Child("allowedFlexVolumes").Index(idx).Child("driver"),
|
||||||
|
"must specify a driver"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
// validatePSPSELinux validates the SELinux fields of PodSecurityPolicy.
|
// validatePSPSELinux validates the SELinux fields of PodSecurityPolicy.
|
||||||
func validatePSPSELinux(fldPath *field.Path, seLinux *extensions.SELinuxStrategyOptions) field.ErrorList {
|
func validatePSPSELinux(fldPath *field.Path, seLinux *extensions.SELinuxStrategyOptions) field.ErrorList {
|
||||||
allErrs := field.ErrorList{}
|
allErrs := field.ErrorList{}
|
||||||
@@ -802,7 +817,6 @@ func validatePodSecurityPolicyVolumes(fldPath *field.Path, volumes []extensions.
|
|||||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumes"), v, allowed.List()))
|
allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumes"), v, allowed.List()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2450,6 +2450,13 @@ func TestValidatePodSecurityPolicy(t *testing.T) {
|
|||||||
pe := true
|
pe := true
|
||||||
invalidDefaultAllowPrivilegeEscalation.Spec.DefaultAllowPrivilegeEscalation = &pe
|
invalidDefaultAllowPrivilegeEscalation.Spec.DefaultAllowPrivilegeEscalation = &pe
|
||||||
|
|
||||||
|
emptyFlexDriver := validPSP()
|
||||||
|
emptyFlexDriver.Spec.Volumes = []extensions.FSType{extensions.FlexVolume}
|
||||||
|
emptyFlexDriver.Spec.AllowedFlexVolumes = []extensions.AllowedFlexVolume{{}}
|
||||||
|
|
||||||
|
nonEmptyFlexVolumes := validPSP()
|
||||||
|
nonEmptyFlexVolumes.Spec.AllowedFlexVolumes = []extensions.AllowedFlexVolume{{Driver: "example/driver"}}
|
||||||
|
|
||||||
type testCase struct {
|
type testCase struct {
|
||||||
psp *extensions.PodSecurityPolicy
|
psp *extensions.PodSecurityPolicy
|
||||||
errorType field.ErrorType
|
errorType field.ErrorType
|
||||||
@@ -2581,6 +2588,11 @@ func TestValidatePodSecurityPolicy(t *testing.T) {
|
|||||||
errorType: field.ErrorTypeInvalid,
|
errorType: field.ErrorTypeInvalid,
|
||||||
errorDetail: "must not contain '..'",
|
errorDetail: "must not contain '..'",
|
||||||
},
|
},
|
||||||
|
"empty flex volume driver": {
|
||||||
|
psp: emptyFlexDriver,
|
||||||
|
errorType: field.ErrorTypeRequired,
|
||||||
|
errorDetail: "must specify a driver",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range errorCases {
|
for k, v := range errorCases {
|
||||||
@@ -2660,6 +2672,17 @@ func TestValidatePodSecurityPolicy(t *testing.T) {
|
|||||||
validDefaultAllowPrivilegeEscalation.Spec.DefaultAllowPrivilegeEscalation = &pe
|
validDefaultAllowPrivilegeEscalation.Spec.DefaultAllowPrivilegeEscalation = &pe
|
||||||
validDefaultAllowPrivilegeEscalation.Spec.AllowPrivilegeEscalation = true
|
validDefaultAllowPrivilegeEscalation.Spec.AllowPrivilegeEscalation = true
|
||||||
|
|
||||||
|
flexvolumeWhenFlexVolumesAllowed := validPSP()
|
||||||
|
flexvolumeWhenFlexVolumesAllowed.Spec.Volumes = []extensions.FSType{extensions.FlexVolume}
|
||||||
|
flexvolumeWhenFlexVolumesAllowed.Spec.AllowedFlexVolumes = []extensions.AllowedFlexVolume{
|
||||||
|
{Driver: "example/driver1"},
|
||||||
|
}
|
||||||
|
|
||||||
|
flexvolumeWhenAllVolumesAllowed := validPSP()
|
||||||
|
flexvolumeWhenAllVolumesAllowed.Spec.Volumes = []extensions.FSType{extensions.All}
|
||||||
|
flexvolumeWhenAllVolumesAllowed.Spec.AllowedFlexVolumes = []extensions.AllowedFlexVolume{
|
||||||
|
{Driver: "example/driver2"},
|
||||||
|
}
|
||||||
successCases := map[string]struct {
|
successCases := map[string]struct {
|
||||||
psp *extensions.PodSecurityPolicy
|
psp *extensions.PodSecurityPolicy
|
||||||
}{
|
}{
|
||||||
@@ -2690,6 +2713,12 @@ func TestValidatePodSecurityPolicy(t *testing.T) {
|
|||||||
"valid defaultAllowPrivilegeEscalation as true": {
|
"valid defaultAllowPrivilegeEscalation as true": {
|
||||||
psp: validDefaultAllowPrivilegeEscalation,
|
psp: validDefaultAllowPrivilegeEscalation,
|
||||||
},
|
},
|
||||||
|
"allow white-listed flexVolume when flex volumes are allowed": {
|
||||||
|
psp: flexvolumeWhenFlexVolumesAllowed,
|
||||||
|
},
|
||||||
|
"allow white-listed flexVolume when all volumes are allowed": {
|
||||||
|
psp: flexvolumeWhenAllVolumesAllowed,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range successCases {
|
for k, v := range successCases {
|
||||||
|
|||||||
21
pkg/apis/extensions/zz_generated.deepcopy.go
generated
21
pkg/apis/extensions/zz_generated.deepcopy.go
generated
@@ -26,6 +26,22 @@ import (
|
|||||||
core "k8s.io/kubernetes/pkg/apis/core"
|
core "k8s.io/kubernetes/pkg/apis/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) {
|
||||||
|
*out = *in
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume.
|
||||||
|
func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(AllowedFlexVolume)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) {
|
func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -919,6 +935,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) {
|
|||||||
*out = make([]AllowedHostPath, len(*in))
|
*out = make([]AllowedHostPath, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
|
if in.AllowedFlexVolumes != nil {
|
||||||
|
in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes
|
||||||
|
*out = make([]AllowedFlexVolume, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -50,6 +50,7 @@ go_library(
|
|||||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||||
"//vendor/gopkg.in/gcfg.v1:go_default_library",
|
"//vendor/gopkg.in/gcfg.v1:go_default_library",
|
||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
|
|||||||
@@ -48,6 +48,7 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
@@ -285,6 +286,10 @@ type EC2 interface {
|
|||||||
// Delete an EBS volume
|
// Delete an EBS volume
|
||||||
DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error)
|
DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error)
|
||||||
|
|
||||||
|
ModifyVolume(*ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error)
|
||||||
|
|
||||||
|
DescribeVolumeModifications(*ec2.DescribeVolumesModificationsInput) ([]*ec2.VolumeModification, error)
|
||||||
|
|
||||||
DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error)
|
DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error)
|
||||||
|
|
||||||
CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error)
|
CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error)
|
||||||
@@ -453,6 +458,9 @@ type Volumes interface {
|
|||||||
|
|
||||||
// Check if disks specified in argument map are still attached to their respective nodes.
|
// Check if disks specified in argument map are still attached to their respective nodes.
|
||||||
DisksAreAttached(map[types.NodeName][]KubernetesVolumeID) (map[types.NodeName]map[KubernetesVolumeID]bool, error)
|
DisksAreAttached(map[types.NodeName][]KubernetesVolumeID) (map[types.NodeName]map[KubernetesVolumeID]bool, error)
|
||||||
|
|
||||||
|
// Expand the disk to new size
|
||||||
|
ResizeDisk(diskName KubernetesVolumeID, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstanceGroups is an interface for managing cloud-managed instance groups / autoscaling instance groups
|
// InstanceGroups is an interface for managing cloud-managed instance groups / autoscaling instance groups
|
||||||
@@ -833,6 +841,36 @@ func (s *awsSdkEC2) DeleteVolume(request *ec2.DeleteVolumeInput) (*ec2.DeleteVol
|
|||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *awsSdkEC2) ModifyVolume(request *ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error) {
|
||||||
|
requestTime := time.Now()
|
||||||
|
resp, err := s.ec2.ModifyVolume(request)
|
||||||
|
timeTaken := time.Since(requestTime).Seconds()
|
||||||
|
recordAwsMetric("modify_volume", timeTaken, err)
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *awsSdkEC2) DescribeVolumeModifications(request *ec2.DescribeVolumesModificationsInput) ([]*ec2.VolumeModification, error) {
|
||||||
|
requestTime := time.Now()
|
||||||
|
results := []*ec2.VolumeModification{}
|
||||||
|
var nextToken *string
|
||||||
|
for {
|
||||||
|
resp, err := s.ec2.DescribeVolumesModifications(request)
|
||||||
|
if err != nil {
|
||||||
|
recordAwsMetric("describe_volume_modification", 0, err)
|
||||||
|
return nil, fmt.Errorf("error listing volume modifictions : %v", err)
|
||||||
|
}
|
||||||
|
results = append(results, resp.VolumesModifications...)
|
||||||
|
nextToken = resp.NextToken
|
||||||
|
if aws.StringValue(nextToken) == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
request.NextToken = nextToken
|
||||||
|
}
|
||||||
|
timeTaken := time.Since(requestTime).Seconds()
|
||||||
|
recordAwsMetric("describe_volume_modification", timeTaken, nil)
|
||||||
|
return results, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *awsSdkEC2) DescribeSubnets(request *ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error) {
|
func (s *awsSdkEC2) DescribeSubnets(request *ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error) {
|
||||||
// Subnets are not paged
|
// Subnets are not paged
|
||||||
response, err := s.ec2.DescribeSubnets(request)
|
response, err := s.ec2.DescribeSubnets(request)
|
||||||
@@ -1653,6 +1691,65 @@ func (d *awsDisk) describeVolume() (*ec2.Volume, error) {
|
|||||||
return volumes[0], nil
|
return volumes[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *awsDisk) describeVolumeModification() (*ec2.VolumeModification, error) {
|
||||||
|
volumeID := d.awsID
|
||||||
|
request := &ec2.DescribeVolumesModificationsInput{
|
||||||
|
VolumeIds: []*string{volumeID.awsString()},
|
||||||
|
}
|
||||||
|
volumeMods, err := d.ec2.DescribeVolumeModifications(request)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error describing volume modification %s with %v", volumeID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(volumeMods) == 0 {
|
||||||
|
return nil, fmt.Errorf("no volume modifications found for %s", volumeID)
|
||||||
|
}
|
||||||
|
lastIndex := len(volumeMods) - 1
|
||||||
|
return volumeMods[lastIndex], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *awsDisk) modifyVolume(requestGiB int64) (int64, error) {
|
||||||
|
volumeID := d.awsID
|
||||||
|
|
||||||
|
request := &ec2.ModifyVolumeInput{
|
||||||
|
VolumeId: volumeID.awsString(),
|
||||||
|
Size: aws.Int64(requestGiB),
|
||||||
|
}
|
||||||
|
output, err := d.ec2.ModifyVolume(request)
|
||||||
|
if err != nil {
|
||||||
|
modifyError := fmt.Errorf("AWS modifyVolume failed for %s with %v", volumeID, err)
|
||||||
|
return requestGiB, modifyError
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeModification := output.VolumeModification
|
||||||
|
|
||||||
|
if aws.StringValue(volumeModification.ModificationState) == ec2.VolumeModificationStateCompleted {
|
||||||
|
return aws.Int64Value(volumeModification.TargetSize), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
backoff := wait.Backoff{
|
||||||
|
Duration: 1 * time.Second,
|
||||||
|
Factor: 2,
|
||||||
|
Steps: 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
checkForResize := func() (bool, error) {
|
||||||
|
volumeModification, err := d.describeVolumeModification()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if aws.StringValue(volumeModification.ModificationState) == ec2.VolumeModificationStateCompleted {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
waitWithErr := wait.ExponentialBackoff(backoff, checkForResize)
|
||||||
|
return requestGiB, waitWithErr
|
||||||
|
}
|
||||||
|
|
||||||
// applyUnSchedulableTaint applies a unschedulable taint to a node after verifying
|
// applyUnSchedulableTaint applies a unschedulable taint to a node after verifying
|
||||||
// if node has become unusable because of volumes getting stuck in attaching state.
|
// if node has become unusable because of volumes getting stuck in attaching state.
|
||||||
func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) {
|
func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) {
|
||||||
@@ -2321,6 +2418,37 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume
|
|||||||
return attached, nil
|
return attached, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cloud) ResizeDisk(
|
||||||
|
diskName KubernetesVolumeID,
|
||||||
|
oldSize resource.Quantity,
|
||||||
|
newSize resource.Quantity) (resource.Quantity, error) {
|
||||||
|
awsDisk, err := newAWSDisk(c, diskName)
|
||||||
|
if err != nil {
|
||||||
|
return oldSize, err
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeInfo, err := awsDisk.describeVolume()
|
||||||
|
if err != nil {
|
||||||
|
descErr := fmt.Errorf("AWS.ResizeDisk Error describing volume %s with %v", diskName, err)
|
||||||
|
return oldSize, descErr
|
||||||
|
}
|
||||||
|
requestBytes := newSize.Value()
|
||||||
|
// AWS resizes in chunks of GiB (not GB)
|
||||||
|
requestGiB := volume.RoundUpSize(requestBytes, 1024*1024*1024)
|
||||||
|
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
|
||||||
|
|
||||||
|
// If disk already if of greater or equal size than requested we return
|
||||||
|
if aws.Int64Value(volumeInfo.Size) >= requestGiB {
|
||||||
|
return newSizeQuant, nil
|
||||||
|
}
|
||||||
|
_, err = awsDisk.modifyVolume(requestGiB)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return oldSize, err
|
||||||
|
}
|
||||||
|
return newSizeQuant, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Gets the current load balancer state
|
// Gets the current load balancer state
|
||||||
func (c *Cloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescription, error) {
|
func (c *Cloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescription, error) {
|
||||||
request := &elb.DescribeLoadBalancersInput{}
|
request := &elb.DescribeLoadBalancersInput{}
|
||||||
|
|||||||
@@ -203,6 +203,14 @@ func (ec2i *FakeEC2Impl) RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngr
|
|||||||
panic("Not implemented")
|
panic("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ec2i *FakeEC2Impl) DescribeVolumeModifications(*ec2.DescribeVolumesModificationsInput) ([]*ec2.VolumeModification, error) {
|
||||||
|
panic("Not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ec2i *FakeEC2Impl) ModifyVolume(*ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error) {
|
||||||
|
panic("Not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
func (ec2i *FakeEC2Impl) CreateSubnet(request *ec2.Subnet) (*ec2.CreateSubnetOutput, error) {
|
func (ec2i *FakeEC2Impl) CreateSubnet(request *ec2.Subnet) (*ec2.CreateSubnetOutput, error) {
|
||||||
ec2i.Subnets = append(ec2i.Subnets, request)
|
ec2i.Subnets = append(ec2i.Subnets, request)
|
||||||
response := &ec2.CreateSubnetOutput{
|
response := &ec2.CreateSubnetOutput{
|
||||||
|
|||||||
@@ -133,6 +133,7 @@ filegroup(
|
|||||||
"//pkg/controller/volume/events:all-srcs",
|
"//pkg/controller/volume/events:all-srcs",
|
||||||
"//pkg/controller/volume/expand:all-srcs",
|
"//pkg/controller/volume/expand:all-srcs",
|
||||||
"//pkg/controller/volume/persistentvolume:all-srcs",
|
"//pkg/controller/volume/persistentvolume:all-srcs",
|
||||||
|
"//pkg/controller/volume/pvcprotection:all-srcs",
|
||||||
],
|
],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -16,12 +16,15 @@ go_test(
|
|||||||
importpath = "k8s.io/kubernetes/pkg/controller/node/ipam",
|
importpath = "k8s.io/kubernetes/pkg/controller/node/ipam",
|
||||||
library = ":go_default_library",
|
library = ":go_default_library",
|
||||||
deps = [
|
deps = [
|
||||||
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/node/ipam/cidrset:go_default_library",
|
"//pkg/controller/node/ipam/cidrset:go_default_library",
|
||||||
"//pkg/controller/node/ipam/test:go_default_library",
|
"//pkg/controller/node/ipam/test:go_default_library",
|
||||||
"//pkg/controller/testutil:go_default_library",
|
"//pkg/controller/testutil:go_default_library",
|
||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@@ -41,6 +44,7 @@ go_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//pkg/cloudprovider:go_default_library",
|
"//pkg/cloudprovider:go_default_library",
|
||||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||||
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/controller/node/ipam/cidrset:go_default_library",
|
"//pkg/controller/node/ipam/cidrset:go_default_library",
|
||||||
"//pkg/controller/node/ipam/sync:go_default_library",
|
"//pkg/controller/node/ipam/sync:go_default_library",
|
||||||
"//pkg/controller/node/util:go_default_library",
|
"//pkg/controller/node/util:go_default_library",
|
||||||
@@ -52,12 +56,14 @@ go_library(
|
|||||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||||
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme:go_default_library",
|
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme:go_default_library",
|
||||||
|
|||||||
@@ -80,12 +80,12 @@ type CIDRAllocator interface {
|
|||||||
AllocateOrOccupyCIDR(node *v1.Node) error
|
AllocateOrOccupyCIDR(node *v1.Node) error
|
||||||
// ReleaseCIDR releases the CIDR of the removed node
|
// ReleaseCIDR releases the CIDR of the removed node
|
||||||
ReleaseCIDR(node *v1.Node) error
|
ReleaseCIDR(node *v1.Node) error
|
||||||
// Register allocator with the nodeInformer for updates.
|
// Run starts all the working logic of the allocator.
|
||||||
Register(nodeInformer informers.NodeInformer)
|
Run(stopCh <-chan struct{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new CIDR range allocator.
|
// New creates a new CIDR range allocator.
|
||||||
func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, allocatorType CIDRAllocatorType, clusterCIDR, serviceCIDR *net.IPNet, nodeCIDRMaskSize int) (CIDRAllocator, error) {
|
func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, clusterCIDR, serviceCIDR *net.IPNet, nodeCIDRMaskSize int) (CIDRAllocator, error) {
|
||||||
nodeList, err := listNodes(kubeClient)
|
nodeList, err := listNodes(kubeClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -93,9 +93,9 @@ func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, allocato
|
|||||||
|
|
||||||
switch allocatorType {
|
switch allocatorType {
|
||||||
case RangeAllocatorType:
|
case RangeAllocatorType:
|
||||||
return NewCIDRRangeAllocator(kubeClient, clusterCIDR, serviceCIDR, nodeCIDRMaskSize, nodeList)
|
return NewCIDRRangeAllocator(kubeClient, nodeInformer, clusterCIDR, serviceCIDR, nodeCIDRMaskSize, nodeList)
|
||||||
case CloudAllocatorType:
|
case CloudAllocatorType:
|
||||||
return NewCloudCIDRAllocator(kubeClient, cloud)
|
return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("Invalid CIDR allocator type: %v", allocatorType)
|
return nil, fmt.Errorf("Invalid CIDR allocator type: %v", allocatorType)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,9 +25,10 @@ import (
|
|||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
|
||||||
informers "k8s.io/client-go/informers/core/v1"
|
informers "k8s.io/client-go/informers/core/v1"
|
||||||
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
|
|
||||||
@@ -37,6 +38,7 @@ import (
|
|||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||||
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/node/util"
|
"k8s.io/kubernetes/pkg/controller/node/util"
|
||||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||||
)
|
)
|
||||||
@@ -49,6 +51,12 @@ type cloudCIDRAllocator struct {
|
|||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
cloud *gce.GCECloud
|
cloud *gce.GCECloud
|
||||||
|
|
||||||
|
// nodeLister is able to list/get nodes and is populated by the shared informer passed to
|
||||||
|
// NewCloudCIDRAllocator.
|
||||||
|
nodeLister corelisters.NodeLister
|
||||||
|
// nodesSynced returns true if the node shared informer has been synced at least once.
|
||||||
|
nodesSynced cache.InformerSynced
|
||||||
|
|
||||||
// Channel that is used to pass updating Nodes to the background.
|
// Channel that is used to pass updating Nodes to the background.
|
||||||
// This increases the throughput of CIDR assignment by parallelization
|
// This increases the throughput of CIDR assignment by parallelization
|
||||||
// and not blocking on long operations (which shouldn't be done from
|
// and not blocking on long operations (which shouldn't be done from
|
||||||
@@ -64,7 +72,7 @@ type cloudCIDRAllocator struct {
|
|||||||
var _ CIDRAllocator = (*cloudCIDRAllocator)(nil)
|
var _ CIDRAllocator = (*cloudCIDRAllocator)(nil)
|
||||||
|
|
||||||
// NewCloudCIDRAllocator creates a new cloud CIDR allocator.
|
// NewCloudCIDRAllocator creates a new cloud CIDR allocator.
|
||||||
func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Interface) (CIDRAllocator, error) {
|
func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) {
|
||||||
if client == nil {
|
if client == nil {
|
||||||
glog.Fatalf("kubeClient is nil when starting NodeController")
|
glog.Fatalf("kubeClient is nil when starting NodeController")
|
||||||
}
|
}
|
||||||
@@ -84,20 +92,45 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
|
|||||||
ca := &cloudCIDRAllocator{
|
ca := &cloudCIDRAllocator{
|
||||||
client: client,
|
client: client,
|
||||||
cloud: gceCloud,
|
cloud: gceCloud,
|
||||||
|
nodeLister: nodeInformer.Lister(),
|
||||||
|
nodesSynced: nodeInformer.Informer().HasSynced,
|
||||||
nodeUpdateChannel: make(chan string, cidrUpdateQueueSize),
|
nodeUpdateChannel: make(chan string, cidrUpdateQueueSize),
|
||||||
recorder: recorder,
|
recorder: recorder,
|
||||||
nodesInProcessing: sets.NewString(),
|
nodesInProcessing: sets.NewString(),
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < cidrUpdateWorkers; i++ {
|
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
// TODO: Take stopChan as an argument to NewCloudCIDRAllocator and pass it to the worker.
|
AddFunc: util.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR),
|
||||||
go ca.worker(wait.NeverStop)
|
UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
||||||
}
|
if newNode.Spec.PodCIDR == "" {
|
||||||
|
return ca.AllocateOrOccupyCIDR(newNode)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}),
|
||||||
|
DeleteFunc: util.CreateDeleteNodeHandler(ca.ReleaseCIDR),
|
||||||
|
})
|
||||||
|
|
||||||
glog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
|
glog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
|
||||||
return ca, nil
|
return ca, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ca *cloudCIDRAllocator) Run(stopCh <-chan struct{}) {
|
||||||
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
|
glog.Infof("Starting cloud CIDR allocator")
|
||||||
|
defer glog.Infof("Shutting down cloud CIDR allocator")
|
||||||
|
|
||||||
|
if !controller.WaitForCacheSync("cidrallocator", stopCh, ca.nodesSynced) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < cidrUpdateWorkers; i++ {
|
||||||
|
go ca.worker(stopCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
<-stopCh
|
||||||
|
}
|
||||||
|
|
||||||
func (ca *cloudCIDRAllocator) worker(stopChan <-chan struct{}) {
|
func (ca *cloudCIDRAllocator) worker(stopChan <-chan struct{}) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@@ -169,7 +202,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
|||||||
|
|
||||||
for rep := 0; rep < cidrUpdateRetries; rep++ {
|
for rep := 0; rep < cidrUpdateRetries; rep++ {
|
||||||
// TODO: change it to using PATCH instead of full Node updates.
|
// TODO: change it to using PATCH instead of full Node updates.
|
||||||
node, err = ca.client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
node, err = ca.nodeLister.Get(nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", nodeName, err)
|
glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", nodeName, err)
|
||||||
continue
|
continue
|
||||||
@@ -218,16 +251,3 @@ func (ca *cloudCIDRAllocator) ReleaseCIDR(node *v1.Node) error {
|
|||||||
node.Name, node.Spec.PodCIDR)
|
node.Name, node.Spec.PodCIDR)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ca *cloudCIDRAllocator) Register(nodeInformer informers.NodeInformer) {
|
|
||||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
|
||||||
AddFunc: util.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR),
|
|
||||||
UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
|
||||||
if newNode.Spec.PodCIDR == "" {
|
|
||||||
return ca.AllocateOrOccupyCIDR(newNode)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}),
|
|
||||||
DeleteFunc: util.CreateDeleteNodeHandler(ca.ReleaseCIDR),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -25,16 +25,16 @@ import (
|
|||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
|
||||||
informers "k8s.io/client-go/informers/core/v1"
|
informers "k8s.io/client-go/informers/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/node/ipam/cidrset"
|
"k8s.io/kubernetes/pkg/controller/node/ipam/cidrset"
|
||||||
"k8s.io/kubernetes/pkg/controller/node/util"
|
"k8s.io/kubernetes/pkg/controller/node/util"
|
||||||
)
|
)
|
||||||
@@ -45,6 +45,12 @@ type rangeAllocator struct {
|
|||||||
clusterCIDR *net.IPNet
|
clusterCIDR *net.IPNet
|
||||||
maxCIDRs int
|
maxCIDRs int
|
||||||
|
|
||||||
|
// nodeLister is able to list/get nodes and is populated by the shared informer passed to
|
||||||
|
// NewCloudCIDRAllocator.
|
||||||
|
nodeLister corelisters.NodeLister
|
||||||
|
// nodesSynced returns true if the node shared informer has been synced at least once.
|
||||||
|
nodesSynced cache.InformerSynced
|
||||||
|
|
||||||
// Channel that is used to pass updating Nodes with assigned CIDRs to the background
|
// Channel that is used to pass updating Nodes with assigned CIDRs to the background
|
||||||
// This increases a throughput of CIDR assignment by not blocking on long operations.
|
// This increases a throughput of CIDR assignment by not blocking on long operations.
|
||||||
nodeCIDRUpdateChannel chan nodeAndCIDR
|
nodeCIDRUpdateChannel chan nodeAndCIDR
|
||||||
@@ -59,7 +65,7 @@ type rangeAllocator struct {
|
|||||||
// Caller must ensure subNetMaskSize is not less than cluster CIDR mask size.
|
// Caller must ensure subNetMaskSize is not less than cluster CIDR mask size.
|
||||||
// Caller must always pass in a list of existing nodes so the new allocator
|
// Caller must always pass in a list of existing nodes so the new allocator
|
||||||
// can initialize its CIDR map. NodeList is only nil in testing.
|
// can initialize its CIDR map. NodeList is only nil in testing.
|
||||||
func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
|
func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
|
||||||
if client == nil {
|
if client == nil {
|
||||||
glog.Fatalf("kubeClient is nil when starting NodeController")
|
glog.Fatalf("kubeClient is nil when starting NodeController")
|
||||||
}
|
}
|
||||||
@@ -78,6 +84,8 @@ func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, s
|
|||||||
client: client,
|
client: client,
|
||||||
cidrs: set,
|
cidrs: set,
|
||||||
clusterCIDR: clusterCIDR,
|
clusterCIDR: clusterCIDR,
|
||||||
|
nodeLister: nodeInformer.Lister(),
|
||||||
|
nodesSynced: nodeInformer.Informer().HasSynced,
|
||||||
nodeCIDRUpdateChannel: make(chan nodeAndCIDR, cidrUpdateQueueSize),
|
nodeCIDRUpdateChannel: make(chan nodeAndCIDR, cidrUpdateQueueSize),
|
||||||
recorder: recorder,
|
recorder: recorder,
|
||||||
nodesInProcessing: sets.NewString(),
|
nodesInProcessing: sets.NewString(),
|
||||||
@@ -107,14 +115,57 @@ func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, s
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < cidrUpdateWorkers; i++ {
|
|
||||||
// TODO: Take stopChan as an argument to NewCIDRRangeAllocator and pass it to the worker.
|
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
go ra.worker(wait.NeverStop)
|
AddFunc: util.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR),
|
||||||
}
|
UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
||||||
|
// If the PodCIDR is not empty we either:
|
||||||
|
// - already processed a Node that already had a CIDR after NC restarted
|
||||||
|
// (cidr is marked as used),
|
||||||
|
// - already processed a Node successfully and allocated a CIDR for it
|
||||||
|
// (cidr is marked as used),
|
||||||
|
// - already processed a Node but we did saw a "timeout" response and
|
||||||
|
// request eventually got through in this case we haven't released
|
||||||
|
// the allocated CIDR (cidr is still marked as used).
|
||||||
|
// There's a possible error here:
|
||||||
|
// - NC sees a new Node and assigns a CIDR X to it,
|
||||||
|
// - Update Node call fails with a timeout,
|
||||||
|
// - Node is updated by some other component, NC sees an update and
|
||||||
|
// assigns CIDR Y to the Node,
|
||||||
|
// - Both CIDR X and CIDR Y are marked as used in the local cache,
|
||||||
|
// even though Node sees only CIDR Y
|
||||||
|
// The problem here is that in in-memory cache we see CIDR X as marked,
|
||||||
|
// which prevents it from being assigned to any new node. The cluster
|
||||||
|
// state is correct.
|
||||||
|
// Restart of NC fixes the issue.
|
||||||
|
if newNode.Spec.PodCIDR == "" {
|
||||||
|
return ra.AllocateOrOccupyCIDR(newNode)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}),
|
||||||
|
DeleteFunc: util.CreateDeleteNodeHandler(ra.ReleaseCIDR),
|
||||||
|
})
|
||||||
|
|
||||||
return ra, nil
|
return ra, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *rangeAllocator) Run(stopCh <-chan struct{}) {
|
||||||
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
|
glog.Infof("Starting range CIDR allocator")
|
||||||
|
defer glog.Infof("Shutting down range CIDR allocator")
|
||||||
|
|
||||||
|
if !controller.WaitForCacheSync("cidrallocator", stopCh, r.nodesSynced) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < cidrUpdateWorkers; i++ {
|
||||||
|
go r.worker(stopCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
<-stopCh
|
||||||
|
}
|
||||||
|
|
||||||
func (r *rangeAllocator) worker(stopChan <-chan struct{}) {
|
func (r *rangeAllocator) worker(stopChan <-chan struct{}) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@@ -232,7 +283,7 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
|
|||||||
podCIDR := data.cidr.String()
|
podCIDR := data.cidr.String()
|
||||||
for rep := 0; rep < cidrUpdateRetries; rep++ {
|
for rep := 0; rep < cidrUpdateRetries; rep++ {
|
||||||
// TODO: change it to using PATCH instead of full Node updates.
|
// TODO: change it to using PATCH instead of full Node updates.
|
||||||
node, err = r.client.CoreV1().Nodes().Get(data.nodeName, metav1.GetOptions{})
|
node, err = r.nodeLister.Get(data.nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err)
|
glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err)
|
||||||
continue
|
continue
|
||||||
@@ -269,35 +320,3 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *rangeAllocator) Register(nodeInformer informers.NodeInformer) {
|
|
||||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
|
||||||
AddFunc: util.CreateAddNodeHandler(r.AllocateOrOccupyCIDR),
|
|
||||||
UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
|
||||||
// If the PodCIDR is not empty we either:
|
|
||||||
// - already processed a Node that already had a CIDR after NC restarted
|
|
||||||
// (cidr is marked as used),
|
|
||||||
// - already processed a Node successfully and allocated a CIDR for it
|
|
||||||
// (cidr is marked as used),
|
|
||||||
// - already processed a Node but we did saw a "timeout" response and
|
|
||||||
// request eventually got through in this case we haven't released
|
|
||||||
// the allocated CIDR (cidr is still marked as used).
|
|
||||||
// There's a possible error here:
|
|
||||||
// - NC sees a new Node and assigns a CIDR X to it,
|
|
||||||
// - Update Node call fails with a timeout,
|
|
||||||
// - Node is updated by some other component, NC sees an update and
|
|
||||||
// assigns CIDR Y to the Node,
|
|
||||||
// - Both CIDR X and CIDR Y are marked as used in the local cache,
|
|
||||||
// even though Node sees only CIDR Y
|
|
||||||
// The problem here is that in in-memory cache we see CIDR X as marked,
|
|
||||||
// which prevents it from being assigned to any new node. The cluster
|
|
||||||
// state is correct.
|
|
||||||
// Restart of NC fixes the issue.
|
|
||||||
if newNode.Spec.PodCIDR == "" {
|
|
||||||
return r.AllocateOrOccupyCIDR(newNode)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}),
|
|
||||||
DeleteFunc: util.CreateDeleteNodeHandler(r.ReleaseCIDR),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -24,7 +24,10 @@ import (
|
|||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/client-go/informers"
|
||||||
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -32,6 +35,8 @@ const (
|
|||||||
nodePollInterval = 100 * time.Millisecond
|
nodePollInterval = 100 * time.Millisecond
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var alwaysReady = func() bool { return true }
|
||||||
|
|
||||||
func waitForUpdatedNodeWithTimeout(nodeHandler *testutil.FakeNodeHandler, number int, timeout time.Duration) error {
|
func waitForUpdatedNodeWithTimeout(nodeHandler *testutil.FakeNodeHandler, number int, timeout time.Duration) error {
|
||||||
return wait.Poll(nodePollInterval, timeout, func() (bool, error) {
|
return wait.Poll(nodePollInterval, timeout, func() (bool, error) {
|
||||||
if len(nodeHandler.GetUpdatedNodesCopy()) >= number {
|
if len(nodeHandler.GetUpdatedNodesCopy()) >= number {
|
||||||
@@ -41,6 +46,19 @@ func waitForUpdatedNodeWithTimeout(nodeHandler *testutil.FakeNodeHandler, number
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Creates a fakeNodeInformer using the provided fakeNodeHandler.
|
||||||
|
func getFakeNodeInformer(fakeNodeHandler *testutil.FakeNodeHandler) coreinformers.NodeInformer {
|
||||||
|
fakeClient := &fake.Clientset{}
|
||||||
|
fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc())
|
||||||
|
fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes()
|
||||||
|
|
||||||
|
for _, node := range fakeNodeHandler.Existing {
|
||||||
|
fakeNodeInformer.Informer().GetStore().Add(node)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fakeNodeInformer
|
||||||
|
}
|
||||||
|
|
||||||
func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
description string
|
description string
|
||||||
@@ -130,19 +148,23 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
|||||||
expectedAllocatedCIDR string
|
expectedAllocatedCIDR string
|
||||||
allocatedCIDRs []string
|
allocatedCIDRs []string
|
||||||
}) {
|
}) {
|
||||||
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
|
// Initialize the range allocator.
|
||||||
|
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
|
||||||
|
rangeAllocator, ok := allocator.(*rangeAllocator)
|
||||||
|
if !ok {
|
||||||
|
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rangeAllocator.nodesSynced = alwaysReady
|
||||||
|
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
||||||
|
go allocator.Run(wait.NeverStop)
|
||||||
|
|
||||||
// this is a bit of white box testing
|
// this is a bit of white box testing
|
||||||
for _, allocated := range tc.allocatedCIDRs {
|
for _, allocated := range tc.allocatedCIDRs {
|
||||||
_, cidr, err := net.ParseCIDR(allocated)
|
_, cidr, err := net.ParseCIDR(allocated)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
|
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
|
||||||
}
|
}
|
||||||
rangeAllocator, ok := allocator.(*rangeAllocator)
|
|
||||||
if !ok {
|
|
||||||
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
|
||||||
if err = rangeAllocator.cidrs.Occupy(cidr); err != nil {
|
if err = rangeAllocator.cidrs.Occupy(cidr); err != nil {
|
||||||
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
|
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
|
||||||
}
|
}
|
||||||
@@ -212,19 +234,23 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
|||||||
subNetMaskSize int
|
subNetMaskSize int
|
||||||
allocatedCIDRs []string
|
allocatedCIDRs []string
|
||||||
}) {
|
}) {
|
||||||
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
|
// Initialize the range allocator.
|
||||||
|
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
|
||||||
|
rangeAllocator, ok := allocator.(*rangeAllocator)
|
||||||
|
if !ok {
|
||||||
|
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rangeAllocator.nodesSynced = alwaysReady
|
||||||
|
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
||||||
|
go allocator.Run(wait.NeverStop)
|
||||||
|
|
||||||
// this is a bit of white box testing
|
// this is a bit of white box testing
|
||||||
for _, allocated := range tc.allocatedCIDRs {
|
for _, allocated := range tc.allocatedCIDRs {
|
||||||
_, cidr, err := net.ParseCIDR(allocated)
|
_, cidr, err := net.ParseCIDR(allocated)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
|
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
|
||||||
}
|
}
|
||||||
rangeAllocator, ok := allocator.(*rangeAllocator)
|
|
||||||
if !ok {
|
|
||||||
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
|
||||||
err = rangeAllocator.cidrs.Occupy(cidr)
|
err = rangeAllocator.cidrs.Occupy(cidr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
|
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
|
||||||
@@ -324,19 +350,23 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
|||||||
allocatedCIDRs []string
|
allocatedCIDRs []string
|
||||||
cidrsToRelease []string
|
cidrsToRelease []string
|
||||||
}) {
|
}) {
|
||||||
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
|
// Initialize the range allocator.
|
||||||
|
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.clusterCIDR, tc.serviceCIDR, tc.subNetMaskSize, nil)
|
||||||
|
rangeAllocator, ok := allocator.(*rangeAllocator)
|
||||||
|
if !ok {
|
||||||
|
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rangeAllocator.nodesSynced = alwaysReady
|
||||||
|
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
||||||
|
go allocator.Run(wait.NeverStop)
|
||||||
|
|
||||||
// this is a bit of white box testing
|
// this is a bit of white box testing
|
||||||
for _, allocated := range tc.allocatedCIDRs {
|
for _, allocated := range tc.allocatedCIDRs {
|
||||||
_, cidr, err := net.ParseCIDR(allocated)
|
_, cidr, err := net.ParseCIDR(allocated)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
|
t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err)
|
||||||
}
|
}
|
||||||
rangeAllocator, ok := allocator.(*rangeAllocator)
|
|
||||||
if !ok {
|
|
||||||
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
|
||||||
err = rangeAllocator.cidrs.Occupy(cidr)
|
err = rangeAllocator.cidrs.Occupy(cidr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
|
t.Fatalf("%v: unexpected error when occupying CIDR %v: %v", tc.description, allocated, err)
|
||||||
|
|||||||
@@ -360,11 +360,10 @@ func NewNodeController(
|
|||||||
} else {
|
} else {
|
||||||
var err error
|
var err error
|
||||||
nc.cidrAllocator, err = ipam.New(
|
nc.cidrAllocator, err = ipam.New(
|
||||||
kubeClient, cloud, nc.allocatorType, nc.clusterCIDR, nc.serviceCIDR, nodeCIDRMaskSize)
|
kubeClient, cloud, nodeInformer, nc.allocatorType, nc.clusterCIDR, nc.serviceCIDR, nodeCIDRMaskSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
nc.cidrAllocator.Register(nodeInformer)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -585,6 +584,12 @@ func (nc *Controller) Run(stopCh <-chan struct{}) {
|
|||||||
go wait.Until(nc.doEvictionPass, scheduler.NodeEvictionPeriod, wait.NeverStop)
|
go wait.Until(nc.doEvictionPass, scheduler.NodeEvictionPeriod, wait.NeverStop)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if nc.allocateNodeCIDRs {
|
||||||
|
if nc.allocatorType != ipam.IPAMFromClusterAllocatorType && nc.allocatorType != ipam.IPAMFromCloudAllocatorType {
|
||||||
|
go nc.cidrAllocator.Run(wait.NeverStop)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
<-stopCh
|
<-stopCh
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
61
pkg/controller/volume/pvcprotection/BUILD
Normal file
61
pkg/controller/volume/pvcprotection/BUILD
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["pvc_protection_controller.go"],
|
||||||
|
importpath = "k8s.io/kubernetes/pkg/controller/volume/pvcprotection",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//pkg/controller:go_default_library",
|
||||||
|
"//pkg/util/metrics:go_default_library",
|
||||||
|
"//pkg/volume/util:go_default_library",
|
||||||
|
"//pkg/volume/util/volumehelper:go_default_library",
|
||||||
|
"//vendor/github.com/golang/glog:go_default_library",
|
||||||
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "go_default_test",
|
||||||
|
srcs = ["pvc_protection_controller_test.go"],
|
||||||
|
importpath = "k8s.io/kubernetes/pkg/controller/volume/pvcprotection",
|
||||||
|
library = ":go_default_library",
|
||||||
|
deps = [
|
||||||
|
"//pkg/controller:go_default_library",
|
||||||
|
"//pkg/volume/util:go_default_library",
|
||||||
|
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||||
|
"//vendor/github.com/golang/glog:go_default_library",
|
||||||
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||||
|
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
284
pkg/controller/volume/pvcprotection/pvc_protection_controller.go
Normal file
284
pkg/controller/volume/pvcprotection/pvc_protection_controller.go
Normal file
@@ -0,0 +1,284 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package pvcprotection
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
|
"k8s.io/kubernetes/pkg/util/metrics"
|
||||||
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Controller is controller that removes PVCProtectionFinalizer
|
||||||
|
// from PVCs that are used by no pods.
|
||||||
|
type Controller struct {
|
||||||
|
client clientset.Interface
|
||||||
|
|
||||||
|
pvcLister corelisters.PersistentVolumeClaimLister
|
||||||
|
pvcListerSynced cache.InformerSynced
|
||||||
|
|
||||||
|
podLister corelisters.PodLister
|
||||||
|
podListerSynced cache.InformerSynced
|
||||||
|
|
||||||
|
queue workqueue.RateLimitingInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPVCProtectionController returns a new *{VCProtectionController.
|
||||||
|
func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface) *Controller {
|
||||||
|
e := &Controller{
|
||||||
|
client: cl,
|
||||||
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
|
||||||
|
}
|
||||||
|
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||||
|
metrics.RegisterMetricAndTrackRateLimiterUsage("persistentvolumeclaim_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter())
|
||||||
|
}
|
||||||
|
|
||||||
|
e.pvcLister = pvcInformer.Lister()
|
||||||
|
e.pvcListerSynced = pvcInformer.Informer().HasSynced
|
||||||
|
pvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
|
AddFunc: e.pvcAddedUpdated,
|
||||||
|
UpdateFunc: func(old, new interface{}) {
|
||||||
|
e.pvcAddedUpdated(new)
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
e.podLister = podInformer.Lister()
|
||||||
|
e.podListerSynced = podInformer.Informer().HasSynced
|
||||||
|
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
|
AddFunc: func(obj interface{}) {
|
||||||
|
e.podAddedDeletedUpdated(obj, false)
|
||||||
|
},
|
||||||
|
DeleteFunc: func(obj interface{}) {
|
||||||
|
e.podAddedDeletedUpdated(obj, true)
|
||||||
|
},
|
||||||
|
UpdateFunc: func(old, new interface{}) {
|
||||||
|
e.podAddedDeletedUpdated(new, false)
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run runs the controller goroutines.
|
||||||
|
func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
|
||||||
|
defer utilruntime.HandleCrash()
|
||||||
|
defer c.queue.ShutDown()
|
||||||
|
|
||||||
|
glog.Infof("Starting PVC protection controller")
|
||||||
|
defer glog.Infof("Shutting down PVC protection controller")
|
||||||
|
|
||||||
|
if !controller.WaitForCacheSync("PVC protection", stopCh, c.pvcListerSynced, c.podListerSynced) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < workers; i++ {
|
||||||
|
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
<-stopCh
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Controller) runWorker() {
|
||||||
|
for c.processNextWorkItem() {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// processNextWorkItem deals with one pvcKey off the queue. It returns false when it's time to quit.
|
||||||
|
func (c *Controller) processNextWorkItem() bool {
|
||||||
|
pvcKey, quit := c.queue.Get()
|
||||||
|
if quit {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer c.queue.Done(pvcKey)
|
||||||
|
|
||||||
|
pvcNamespace, pvcName, err := cache.SplitMetaNamespaceKey(pvcKey.(string))
|
||||||
|
if err != nil {
|
||||||
|
utilruntime.HandleError(fmt.Errorf("Error parsing PVC key %q: %v", pvcKey, err))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.processPVC(pvcNamespace, pvcName)
|
||||||
|
if err == nil {
|
||||||
|
c.queue.Forget(pvcKey)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
utilruntime.HandleError(fmt.Errorf("PVC %v failed with : %v", pvcKey, err))
|
||||||
|
c.queue.AddRateLimited(pvcKey)
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Controller) processPVC(pvcNamespace, pvcName string) error {
|
||||||
|
glog.V(4).Infof("Processing PVC %s/%s", pvcNamespace, pvcName)
|
||||||
|
startTime := time.Now()
|
||||||
|
defer func() {
|
||||||
|
glog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Now().Sub(startTime))
|
||||||
|
}()
|
||||||
|
|
||||||
|
pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName)
|
||||||
|
if apierrs.IsNotFound(err) {
|
||||||
|
glog.V(4).Infof("PVC %s/%s not found, ignoring", pvcNamespace, pvcName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if volumeutil.IsPVCBeingDeleted(pvc) && volumeutil.IsProtectionFinalizerPresent(pvc) {
|
||||||
|
// PVC should be deleted. Check if it's used and remove finalizer if
|
||||||
|
// it's not.
|
||||||
|
isUsed, err := c.isBeingUsed(pvc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !isUsed {
|
||||||
|
return c.removeFinalizer(pvc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !volumeutil.IsPVCBeingDeleted(pvc) && !volumeutil.IsProtectionFinalizerPresent(pvc) {
|
||||||
|
// PVC is not being deleted -> it should have the finalizer. The
|
||||||
|
// finalizer should be added by admission plugin, this is just to add
|
||||||
|
// the finalizer to old PVCs that were created before the admission
|
||||||
|
// plugin was enabled.
|
||||||
|
return c.addFinalizer(pvc)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error {
|
||||||
|
claimClone := pvc.DeepCopy()
|
||||||
|
volumeutil.AddProtectionFinalizer(claimClone)
|
||||||
|
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(3).Infof("Error adding protection finalizer to PVC %s/%s: %v", pvc.Namespace, pvc.Name)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
glog.V(3).Infof("Added protection finalizer to PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Controller) removeFinalizer(pvc *v1.PersistentVolumeClaim) error {
|
||||||
|
claimClone := pvc.DeepCopy()
|
||||||
|
volumeutil.RemoveProtectionFinalizer(claimClone)
|
||||||
|
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(3).Infof("Error removing protection finalizer from PVC %s/%s: %v", pvc.Namespace, pvc.Name, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
glog.V(3).Infof("Removed protection finalizer from PVC %s/%s", pvc.Namespace, pvc.Name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Controller) isBeingUsed(pvc *v1.PersistentVolumeClaim) (bool, error) {
|
||||||
|
pods, err := c.podLister.Pods(pvc.Namespace).List(labels.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
for _, pod := range pods {
|
||||||
|
if pod.Spec.NodeName == "" {
|
||||||
|
// This pod is not scheduled. We have a predicated in scheduler that
|
||||||
|
// prevents scheduling pods with deletion timestamp, so we can be
|
||||||
|
// pretty sure it won't be scheduled in parallel to this check.
|
||||||
|
// Therefore this pod does not block the PVC from deletion.
|
||||||
|
glog.V(4).Infof("Skipping unscheduled pod %s when checking PVC %s/%s", pod.Name, pvc.Namespace, pvc.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if volumehelper.IsPodTerminated(pod, pod.Status) {
|
||||||
|
// This pod is being unmounted/detached or is already
|
||||||
|
// unmounted/detached. It does not block the PVC from deletion.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, volume := range pod.Spec.Volumes {
|
||||||
|
if volume.PersistentVolumeClaim == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if volume.PersistentVolumeClaim.ClaimName == pvc.Name {
|
||||||
|
glog.V(2).Infof("Keeping PVC %s/%s, it is used by pod %s/%s", pvc.Namespace, pvc.Name, pod.Namespace, pod.Name)
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(3).Infof("PVC %s/%s is unused", pvc.Namespace, pvc.Name)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// pvcAddedUpdated reacts to pvc added/updated/deleted events
|
||||||
|
func (c *Controller) pvcAddedUpdated(obj interface{}) {
|
||||||
|
pvc, ok := obj.(*v1.PersistentVolumeClaim)
|
||||||
|
if !ok {
|
||||||
|
utilruntime.HandleError(fmt.Errorf("PVC informer returned non-PVC object: %#v", obj))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pvc)
|
||||||
|
if err != nil {
|
||||||
|
utilruntime.HandleError(fmt.Errorf("Couldn't get key for Persistent Volume Claim %#v: %v", pvc, err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
glog.V(4).Infof("Got event on PVC %s", key)
|
||||||
|
|
||||||
|
if (!volumeutil.IsPVCBeingDeleted(pvc) && !volumeutil.IsProtectionFinalizerPresent(pvc)) || (volumeutil.IsPVCBeingDeleted(pvc) && volumeutil.IsProtectionFinalizerPresent(pvc)) {
|
||||||
|
c.queue.Add(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// podAddedDeletedUpdated reacts to Pod events
|
||||||
|
func (c *Controller) podAddedDeletedUpdated(obj interface{}, deleted bool) {
|
||||||
|
pod, ok := obj.(*v1.Pod)
|
||||||
|
if !ok {
|
||||||
|
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||||
|
if !ok {
|
||||||
|
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||||
|
if !ok {
|
||||||
|
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a Pod %#v", obj))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter out pods that can't help us to remove a finalizer on PVC
|
||||||
|
if !deleted && !volumehelper.IsPodTerminated(pod, pod.Status) && pod.Spec.NodeName != "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(4).Infof("Got event on pod %s/%s", pod.Namespace, pod.Name)
|
||||||
|
|
||||||
|
// Enqueue all PVCs that the pod uses
|
||||||
|
for _, volume := range pod.Spec.Volumes {
|
||||||
|
if volume.PersistentVolumeClaim != nil {
|
||||||
|
c.queue.Add(pod.Namespace + "/" + volume.PersistentVolumeClaim.ClaimName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,397 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package pvcprotection
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/client-go/informers"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
clienttesting "k8s.io/client-go/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type reaction struct {
|
||||||
|
verb string
|
||||||
|
resource string
|
||||||
|
reactorfn clienttesting.ReactionFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultNS = "default"
|
||||||
|
defaultPVCName = "pvc1"
|
||||||
|
defaultPodName = "pod1"
|
||||||
|
defaultNodeName = "node1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func pod() *v1.Pod {
|
||||||
|
return &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: defaultPodName,
|
||||||
|
Namespace: defaultNS,
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
NodeName: defaultNodeName,
|
||||||
|
},
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
Phase: v1.PodPending,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unscheduled(pod *v1.Pod) *v1.Pod {
|
||||||
|
pod.Spec.NodeName = ""
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
|
func withPVC(pvcName string, pod *v1.Pod) *v1.Pod {
|
||||||
|
volume := v1.Volume{
|
||||||
|
Name: pvcName,
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||||
|
ClaimName: pvcName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod.Spec.Volumes = append(pod.Spec.Volumes, volume)
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
|
func withEmptyDir(pod *v1.Pod) *v1.Pod {
|
||||||
|
volume := v1.Volume{
|
||||||
|
Name: "emptyDir",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod.Spec.Volumes = append(pod.Spec.Volumes, volume)
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
|
func withStatus(phase v1.PodPhase, pod *v1.Pod) *v1.Pod {
|
||||||
|
pod.Status.Phase = phase
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
|
func pvc() *v1.PersistentVolumeClaim {
|
||||||
|
return &v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: defaultPVCName,
|
||||||
|
Namespace: defaultNS,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func withProtectionFinalizer(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||||
|
pvc.Finalizers = append(pvc.Finalizers, volumeutil.PVCProtectionFinalizer)
|
||||||
|
return pvc
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleted(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||||
|
pvc.DeletionTimestamp = &metav1.Time{}
|
||||||
|
return pvc
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateUpdateErrorFunc(t *testing.T, failures int) clienttesting.ReactionFunc {
|
||||||
|
i := 0
|
||||||
|
return func(action clienttesting.Action) (bool, runtime.Object, error) {
|
||||||
|
i++
|
||||||
|
if i <= failures {
|
||||||
|
// Update fails
|
||||||
|
update, ok := action.(clienttesting.UpdateAction)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("Reactor got non-update action: %+v", action)
|
||||||
|
}
|
||||||
|
acc, _ := meta.Accessor(update.GetObject())
|
||||||
|
return true, nil, apierrors.NewForbidden(update.GetResource().GroupResource(), acc.GetName(), errors.New("Mock error"))
|
||||||
|
}
|
||||||
|
// Update succeeds
|
||||||
|
return false, nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPVCProtectionController(t *testing.T) {
|
||||||
|
pvcVer := schema.GroupVersionResource{
|
||||||
|
Group: v1.GroupName,
|
||||||
|
Version: "v1",
|
||||||
|
Resource: "persistentvolumeclaims",
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
// Object to insert into fake kubeclient before the test starts.
|
||||||
|
initialObjects []runtime.Object
|
||||||
|
// Optional client reactors.
|
||||||
|
reactors []reaction
|
||||||
|
// PVC event to simulate. This PVC will be automatically added to
|
||||||
|
// initalObjects.
|
||||||
|
updatedPVC *v1.PersistentVolumeClaim
|
||||||
|
// Pod event to simulate. This Pod will be automatically added to
|
||||||
|
// initalObjects.
|
||||||
|
updatedPod *v1.Pod
|
||||||
|
// Pod event to similate. This Pod is *not* added to
|
||||||
|
// initalObjects.
|
||||||
|
deletedPod *v1.Pod
|
||||||
|
// List of expected kubeclient actions that should happen during the
|
||||||
|
// test.
|
||||||
|
expectedActions []clienttesting.Action
|
||||||
|
}{
|
||||||
|
//
|
||||||
|
// PVC events
|
||||||
|
//
|
||||||
|
{
|
||||||
|
name: "PVC without finalizer -> finalizer is added",
|
||||||
|
updatedPVC: pvc(),
|
||||||
|
expectedActions: []clienttesting.Action{
|
||||||
|
clienttesting.NewUpdateAction(pvcVer, defaultNS, withProtectionFinalizer(pvc())),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "PVC with finalizer -> no action",
|
||||||
|
updatedPVC: withProtectionFinalizer(pvc()),
|
||||||
|
expectedActions: []clienttesting.Action{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "saving PVC finalizer fails -> controller retries",
|
||||||
|
updatedPVC: pvc(),
|
||||||
|
reactors: []reaction{
|
||||||
|
{
|
||||||
|
verb: "update",
|
||||||
|
resource: "persistentvolumeclaims",
|
||||||
|
reactorfn: generateUpdateErrorFunc(t, 2 /* update fails twice*/),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedActions: []clienttesting.Action{
|
||||||
|
// This fails
|
||||||
|
clienttesting.NewUpdateAction(pvcVer, defaultNS, withProtectionFinalizer(pvc())),
|
||||||
|
// This fails too
|
||||||
|
clienttesting.NewUpdateAction(pvcVer, defaultNS, withProtectionFinalizer(pvc())),
|
||||||
|
// This succeeds
|
||||||
|
clienttesting.NewUpdateAction(pvcVer, defaultNS, withProtectionFinalizer(pvc())),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "deleted PVC with finalizer -> finalizer is removed",
|
||||||
|
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||||
|
expectedActions: []clienttesting.Action{
|
||||||
|
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "finalizer removal fails -> controller retries",
|
||||||
|
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||||
|
reactors: []reaction{
|
||||||
|
{
|
||||||
|
verb: "update",
|
||||||
|
resource: "persistentvolumeclaims",
|
||||||
|
reactorfn: generateUpdateErrorFunc(t, 2 /* update fails twice*/),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedActions: []clienttesting.Action{
|
||||||
|
// Fails
|
||||||
|
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||||
|
// Fails too
|
||||||
|
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||||
|
// Succeeds
|
||||||
|
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "deleted PVC with finalizer + pods with the PVC exists -> finalizer is not removed",
|
||||||
|
initialObjects: []runtime.Object{
|
||||||
|
withPVC(defaultPVCName, pod()),
|
||||||
|
},
|
||||||
|
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||||
|
expectedActions: []clienttesting.Action{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "deleted PVC with finalizer + pods with unrelated PVC and EmptyDir exists -> finalizer is removed",
|
||||||
|
initialObjects: []runtime.Object{
|
||||||
|
withEmptyDir(withPVC("unrelatedPVC", pod())),
|
||||||
|
},
|
||||||
|
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||||
|
expectedActions: []clienttesting.Action{
|
||||||
|
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "deleted PVC with finalizer + pods with the PVC andis finished -> finalizer is removed",
|
||||||
|
initialObjects: []runtime.Object{
|
||||||
|
withStatus(v1.PodFailed, withPVC(defaultPVCName, pod())),
|
||||||
|
},
|
||||||
|
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||||
|
expectedActions: []clienttesting.Action{
|
||||||
|
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
//
|
||||||
|
// Pod events
|
||||||
|
//
|
||||||
|
{
|
||||||
|
name: "updated running Pod -> no action",
|
||||||
|
initialObjects: []runtime.Object{
|
||||||
|
deleted(withProtectionFinalizer(pvc())),
|
||||||
|
},
|
||||||
|
updatedPod: withStatus(v1.PodRunning, withPVC(defaultPVCName, pod())),
|
||||||
|
expectedActions: []clienttesting.Action{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "updated finished Pod -> finalizer is removed",
|
||||||
|
initialObjects: []runtime.Object{
|
||||||
|
deleted(withProtectionFinalizer(pvc())),
|
||||||
|
},
|
||||||
|
updatedPod: withStatus(v1.PodSucceeded, withPVC(defaultPVCName, pod())),
|
||||||
|
expectedActions: []clienttesting.Action{
|
||||||
|
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "updated unscheduled Pod -> finalizer is removed",
|
||||||
|
initialObjects: []runtime.Object{
|
||||||
|
deleted(withProtectionFinalizer(pvc())),
|
||||||
|
},
|
||||||
|
updatedPod: unscheduled(withPVC(defaultPVCName, pod())),
|
||||||
|
expectedActions: []clienttesting.Action{
|
||||||
|
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "deleted running Pod -> finalizer is removed",
|
||||||
|
initialObjects: []runtime.Object{
|
||||||
|
deleted(withProtectionFinalizer(pvc())),
|
||||||
|
},
|
||||||
|
deletedPod: withStatus(v1.PodRunning, withPVC(defaultPVCName, pod())),
|
||||||
|
expectedActions: []clienttesting.Action{
|
||||||
|
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
// Create client with initial data
|
||||||
|
objs := test.initialObjects
|
||||||
|
if test.updatedPVC != nil {
|
||||||
|
objs = append(objs, test.updatedPVC)
|
||||||
|
}
|
||||||
|
if test.updatedPod != nil {
|
||||||
|
objs = append(objs, test.updatedPod)
|
||||||
|
}
|
||||||
|
client := fake.NewSimpleClientset(objs...)
|
||||||
|
|
||||||
|
// Create informers
|
||||||
|
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||||
|
pvcInformer := informers.Core().V1().PersistentVolumeClaims()
|
||||||
|
podInformer := informers.Core().V1().Pods()
|
||||||
|
|
||||||
|
// Populate the informers with initial objects so the controller can
|
||||||
|
// Get() and List() it.
|
||||||
|
for _, obj := range objs {
|
||||||
|
switch obj.(type) {
|
||||||
|
case *v1.PersistentVolumeClaim:
|
||||||
|
pvcInformer.Informer().GetStore().Add(obj)
|
||||||
|
case *v1.Pod:
|
||||||
|
podInformer.Informer().GetStore().Add(obj)
|
||||||
|
default:
|
||||||
|
t.Fatalf("Unknown initalObject type: %+v", obj)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add reactor to inject test errors.
|
||||||
|
for _, reactor := range test.reactors {
|
||||||
|
client.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorfn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the controller
|
||||||
|
ctrl := NewPVCProtectionController(pvcInformer, podInformer, client)
|
||||||
|
|
||||||
|
// Start the test by simulating an event
|
||||||
|
if test.updatedPVC != nil {
|
||||||
|
ctrl.pvcAddedUpdated(test.updatedPVC)
|
||||||
|
}
|
||||||
|
if test.updatedPod != nil {
|
||||||
|
ctrl.podAddedDeletedUpdated(test.updatedPod, false)
|
||||||
|
}
|
||||||
|
if test.deletedPod != nil {
|
||||||
|
ctrl.podAddedDeletedUpdated(test.deletedPod, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process the controller queue until we get expected results
|
||||||
|
timeout := time.Now().Add(10 * time.Second)
|
||||||
|
lastReportedActionCount := 0
|
||||||
|
for {
|
||||||
|
if time.Now().After(timeout) {
|
||||||
|
t.Errorf("Test %q: timed out", test.name)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if ctrl.queue.Len() > 0 {
|
||||||
|
glog.V(5).Infof("Test %q: %d events queue, processing one", test.name, ctrl.queue.Len())
|
||||||
|
ctrl.processNextWorkItem()
|
||||||
|
}
|
||||||
|
if ctrl.queue.Len() > 0 {
|
||||||
|
// There is still some work in the queue, process it now
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
currentActionCount := len(client.Actions())
|
||||||
|
if currentActionCount < len(test.expectedActions) {
|
||||||
|
// Do not log evey wait, only when the action count changes.
|
||||||
|
if lastReportedActionCount < currentActionCount {
|
||||||
|
glog.V(5).Infof("Test %q: got %d actions out of %d, waiting for the rest", test.name, currentActionCount, len(test.expectedActions))
|
||||||
|
lastReportedActionCount = currentActionCount
|
||||||
|
}
|
||||||
|
// The test expected more to happen, wait for the actions.
|
||||||
|
// Most probably it's exponential backoff
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
actions := client.Actions()
|
||||||
|
for i, action := range actions {
|
||||||
|
if len(test.expectedActions) < i+1 {
|
||||||
|
t.Errorf("Test %q: %d unexpected actions: %+v", test.name, len(actions)-len(test.expectedActions), spew.Sdump(actions[i:]))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedAction := test.expectedActions[i]
|
||||||
|
if !reflect.DeepEqual(expectedAction, action) {
|
||||||
|
t.Errorf("Test %q: action %d\nExpected:\n%s\ngot:\n%s", test.name, i, spew.Sdump(expectedAction), spew.Sdump(action))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(test.expectedActions) > len(actions) {
|
||||||
|
t.Errorf("Test %q: %d additional expected actions", test.name, len(test.expectedActions)-len(actions))
|
||||||
|
for _, a := range test.expectedActions[len(actions):] {
|
||||||
|
t.Logf(" %+v", a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -200,6 +200,12 @@ const (
|
|||||||
//
|
//
|
||||||
// Enable Block volume support in containers.
|
// Enable Block volume support in containers.
|
||||||
BlockVolume utilfeature.Feature = "BlockVolume"
|
BlockVolume utilfeature.Feature = "BlockVolume"
|
||||||
|
|
||||||
|
// owner: @pospispa
|
||||||
|
//
|
||||||
|
// alpha: v1.9
|
||||||
|
// Postpone deletion of a persistent volume claim in case it is used by a pod
|
||||||
|
PVCProtection utilfeature.Feature = "PVCProtection"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -237,6 +243,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
|
|||||||
CSIPersistentVolume: {Default: false, PreRelease: utilfeature.Alpha},
|
CSIPersistentVolume: {Default: false, PreRelease: utilfeature.Alpha},
|
||||||
CustomPodDNS: {Default: false, PreRelease: utilfeature.Alpha},
|
CustomPodDNS: {Default: false, PreRelease: utilfeature.Alpha},
|
||||||
BlockVolume: {Default: false, PreRelease: utilfeature.Alpha},
|
BlockVolume: {Default: false, PreRelease: utilfeature.Alpha},
|
||||||
|
PVCProtection: {Default: false, PreRelease: utilfeature.Alpha},
|
||||||
|
|
||||||
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
||||||
// unintentionally on either side:
|
// unintentionally on either side:
|
||||||
|
|||||||
@@ -76,20 +76,9 @@ func (p *cadvisorStatsProvider) ListPodStats() ([]statsapi.PodStats, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get imageFs info: %v", err)
|
return nil, fmt.Errorf("failed to get imageFs info: %v", err)
|
||||||
}
|
}
|
||||||
|
infos, err := getCadvisorContainerInfo(p.cadvisor)
|
||||||
infos, err := p.cadvisor.ContainerInfoV2("/", cadvisorapiv2.RequestOptions{
|
|
||||||
IdType: cadvisorapiv2.TypeName,
|
|
||||||
Count: 2, // 2 samples are needed to compute "instantaneous" CPU
|
|
||||||
Recursive: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, ok := infos["/"]; ok {
|
return nil, fmt.Errorf("failed to get container info from cadvisor: %v", err)
|
||||||
// If the failure is partial, log it and return a best-effort
|
|
||||||
// response.
|
|
||||||
glog.Errorf("Partial failure issuing cadvisor.ContainerInfoV2: %v", err)
|
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("failed to get root cgroup stats: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// removeTerminatedContainerInfo will also remove pod level cgroups, so save the infos into allInfos first
|
// removeTerminatedContainerInfo will also remove pod level cgroups, so save the infos into allInfos first
|
||||||
allInfos := infos
|
allInfos := infos
|
||||||
@@ -352,3 +341,21 @@ func hasMemoryAndCPUInstUsage(info *cadvisorapiv2.ContainerInfo) bool {
|
|||||||
}
|
}
|
||||||
return cstat.CpuInst.Usage.Total != 0 && cstat.Memory.RSS != 0
|
return cstat.CpuInst.Usage.Total != 0 && cstat.Memory.RSS != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getCadvisorContainerInfo(ca cadvisor.Interface) (map[string]cadvisorapiv2.ContainerInfo, error) {
|
||||||
|
infos, err := ca.ContainerInfoV2("/", cadvisorapiv2.RequestOptions{
|
||||||
|
IdType: cadvisorapiv2.TypeName,
|
||||||
|
Count: 2, // 2 samples are needed to compute "instantaneous" CPU
|
||||||
|
Recursive: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if _, ok := infos["/"]; ok {
|
||||||
|
// If the failure is partial, log it and return a best-effort
|
||||||
|
// response.
|
||||||
|
glog.Errorf("Partial failure issuing cadvisor.ContainerInfoV2: %v", err)
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("failed to get root cgroup stats: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return infos, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,7 +18,9 @@ package stats
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"path"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
@@ -112,6 +114,11 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) {
|
|||||||
containerMap[c.Id] = c
|
containerMap[c.Id] = c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
caInfos, err := getCRICadvisorStats(p.cadvisor)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get container info from cadvisor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
for _, stats := range resp {
|
for _, stats := range resp {
|
||||||
containerID := stats.Attributes.Id
|
containerID := stats.Attributes.Id
|
||||||
container, found := containerMap[containerID]
|
container, found := containerMap[containerID]
|
||||||
@@ -132,10 +139,25 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) {
|
|||||||
ps, found := sandboxIDToPodStats[podSandboxID]
|
ps, found := sandboxIDToPodStats[podSandboxID]
|
||||||
if !found {
|
if !found {
|
||||||
ps = buildPodStats(podSandbox)
|
ps = buildPodStats(podSandbox)
|
||||||
|
// Fill stats from cadvisor is available for full set of required pod stats
|
||||||
|
caPodSandbox, found := caInfos[podSandboxID]
|
||||||
|
if !found {
|
||||||
|
glog.V(4).Info("Unable to find cadvisor stats for sandbox %q", podSandboxID)
|
||||||
|
} else {
|
||||||
|
p.addCadvisorPodStats(ps, &caPodSandbox)
|
||||||
|
}
|
||||||
sandboxIDToPodStats[podSandboxID] = ps
|
sandboxIDToPodStats[podSandboxID] = ps
|
||||||
}
|
}
|
||||||
containerStats := p.makeContainerStats(stats, container, &rootFsInfo, uuidToFsInfo)
|
cs := p.makeContainerStats(stats, container, &rootFsInfo, uuidToFsInfo)
|
||||||
ps.Containers = append(ps.Containers, *containerStats)
|
// If cadvisor stats is available for the container, use it to populate
|
||||||
|
// container stats
|
||||||
|
caStats, caFound := caInfos[containerID]
|
||||||
|
if !caFound {
|
||||||
|
glog.V(4).Info("Unable to find cadvisor stats for %q", containerID)
|
||||||
|
} else {
|
||||||
|
p.addCadvisorContainerStats(cs, &caStats)
|
||||||
|
}
|
||||||
|
ps.Containers = append(ps.Containers, *cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
result := make([]statsapi.PodStats, 0, len(sandboxIDToPodStats))
|
result := make([]statsapi.PodStats, 0, len(sandboxIDToPodStats))
|
||||||
@@ -201,7 +223,7 @@ func (p *criStatsProvider) getFsInfo(storageID *runtimeapi.StorageIdentifier) *c
|
|||||||
return &fsInfo
|
return &fsInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildPodRef returns a PodStats that identifies the Pod managing cinfo
|
// buildPodStats returns a PodStats that identifies the Pod managing cinfo
|
||||||
func buildPodStats(podSandbox *runtimeapi.PodSandbox) *statsapi.PodStats {
|
func buildPodStats(podSandbox *runtimeapi.PodSandbox) *statsapi.PodStats {
|
||||||
return &statsapi.PodStats{
|
return &statsapi.PodStats{
|
||||||
PodRef: statsapi.PodReference{
|
PodRef: statsapi.PodReference{
|
||||||
@@ -211,7 +233,6 @@ func buildPodStats(podSandbox *runtimeapi.PodSandbox) *statsapi.PodStats {
|
|||||||
},
|
},
|
||||||
// The StartTime in the summary API is the pod creation time.
|
// The StartTime in the summary API is the pod creation time.
|
||||||
StartTime: metav1.NewTime(time.Unix(0, podSandbox.CreatedAt)),
|
StartTime: metav1.NewTime(time.Unix(0, podSandbox.CreatedAt)),
|
||||||
// Network stats are not supported by CRI.
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -226,6 +247,13 @@ func (p *criStatsProvider) makePodStorageStats(s *statsapi.PodStats, rootFsInfo
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *criStatsProvider) addCadvisorPodStats(
|
||||||
|
ps *statsapi.PodStats,
|
||||||
|
caPodSandbox *cadvisorapiv2.ContainerInfo,
|
||||||
|
) {
|
||||||
|
ps.Network = cadvisorInfoToNetworkStats(ps.PodRef.Name, caPodSandbox)
|
||||||
|
}
|
||||||
|
|
||||||
func (p *criStatsProvider) makeContainerStats(
|
func (p *criStatsProvider) makeContainerStats(
|
||||||
stats *runtimeapi.ContainerStats,
|
stats *runtimeapi.ContainerStats,
|
||||||
container *runtimeapi.Container,
|
container *runtimeapi.Container,
|
||||||
@@ -336,3 +364,44 @@ func removeTerminatedContainer(containers []*runtimeapi.Container) []*runtimeapi
|
|||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *criStatsProvider) addCadvisorContainerStats(
|
||||||
|
cs *statsapi.ContainerStats,
|
||||||
|
caPodStats *cadvisorapiv2.ContainerInfo,
|
||||||
|
) {
|
||||||
|
if caPodStats.Spec.HasCustomMetrics {
|
||||||
|
cs.UserDefinedMetrics = cadvisorInfoToUserDefinedMetrics(caPodStats)
|
||||||
|
}
|
||||||
|
|
||||||
|
cpu, memory := cadvisorInfoToCPUandMemoryStats(caPodStats)
|
||||||
|
if cpu != nil {
|
||||||
|
cs.CPU = cpu
|
||||||
|
}
|
||||||
|
if memory != nil {
|
||||||
|
cs.Memory = memory
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCRICadvisorStats(ca cadvisor.Interface) (map[string]cadvisorapiv2.ContainerInfo, error) {
|
||||||
|
stats := make(map[string]cadvisorapiv2.ContainerInfo)
|
||||||
|
infos, err := getCadvisorContainerInfo(ca)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to fetch cadvisor stats: %v", err)
|
||||||
|
}
|
||||||
|
infos = removeTerminatedContainerInfo(infos)
|
||||||
|
for key, info := range infos {
|
||||||
|
// On systemd using devicemapper each mount into the container has an
|
||||||
|
// associated cgroup. We ignore them to ensure we do not get duplicate
|
||||||
|
// entries in our summary. For details on .mount units:
|
||||||
|
// http://man7.org/linux/man-pages/man5/systemd.mount.5.html
|
||||||
|
if strings.HasSuffix(key, ".mount") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Build the Pod key if this container is managed by a Pod
|
||||||
|
if !isPodManagedContainer(&info) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
stats[path.Base(key)] = info
|
||||||
|
}
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -22,20 +22,47 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
cadvisorfs "github.com/google/cadvisor/fs"
|
cadvisorfs "github.com/google/cadvisor/fs"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
critest "k8s.io/kubernetes/pkg/kubelet/apis/cri/testing"
|
critest "k8s.io/kubernetes/pkg/kubelet/apis/cri/testing"
|
||||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||||
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||||
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
||||||
kubecontainertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
kubecontainertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/leaky"
|
||||||
kubepodtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
kubepodtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
||||||
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
|
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCRIListPodStats(t *testing.T) {
|
func TestCRIListPodStats(t *testing.T) {
|
||||||
|
const (
|
||||||
|
seedRoot = 0
|
||||||
|
seedRuntime = 100
|
||||||
|
seedKubelet = 200
|
||||||
|
seedMisc = 300
|
||||||
|
seedSandbox0 = 1000
|
||||||
|
seedContainer0 = 2000
|
||||||
|
seedSandbox1 = 3000
|
||||||
|
seedContainer1 = 4000
|
||||||
|
seedContainer2 = 5000
|
||||||
|
seedSandbox2 = 6000
|
||||||
|
seedContainer3 = 7000
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
pName0 = "pod0"
|
||||||
|
pName1 = "pod1"
|
||||||
|
pName2 = "pod2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cName0 = "container0-name"
|
||||||
|
cName1 = "container1-name"
|
||||||
|
cName2 = "container2-name"
|
||||||
|
cName3 = "container3-name"
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
imageFsStorageUUID = "imagefs-storage-uuid"
|
imageFsStorageUUID = "imagefs-storage-uuid"
|
||||||
unknownStorageUUID = "unknown-storage-uuid"
|
unknownStorageUUID = "unknown-storage-uuid"
|
||||||
@@ -43,19 +70,19 @@ func TestCRIListPodStats(t *testing.T) {
|
|||||||
rootFsInfo = getTestFsInfo(1000)
|
rootFsInfo = getTestFsInfo(1000)
|
||||||
|
|
||||||
sandbox0 = makeFakePodSandbox("sandbox0-name", "sandbox0-uid", "sandbox0-ns")
|
sandbox0 = makeFakePodSandbox("sandbox0-name", "sandbox0-uid", "sandbox0-ns")
|
||||||
container0 = makeFakeContainer(sandbox0, "container0-name", 0, false)
|
container0 = makeFakeContainer(sandbox0, cName0, 0, false)
|
||||||
containerStats0 = makeFakeContainerStats(container0, imageFsStorageUUID)
|
containerStats0 = makeFakeContainerStats(container0, imageFsStorageUUID)
|
||||||
container1 = makeFakeContainer(sandbox0, "container1-name", 0, false)
|
container1 = makeFakeContainer(sandbox0, cName1, 0, false)
|
||||||
containerStats1 = makeFakeContainerStats(container1, unknownStorageUUID)
|
containerStats1 = makeFakeContainerStats(container1, unknownStorageUUID)
|
||||||
|
|
||||||
sandbox1 = makeFakePodSandbox("sandbox1-name", "sandbox1-uid", "sandbox1-ns")
|
sandbox1 = makeFakePodSandbox("sandbox1-name", "sandbox1-uid", "sandbox1-ns")
|
||||||
container2 = makeFakeContainer(sandbox1, "container2-name", 0, false)
|
container2 = makeFakeContainer(sandbox1, cName2, 0, false)
|
||||||
containerStats2 = makeFakeContainerStats(container2, imageFsStorageUUID)
|
containerStats2 = makeFakeContainerStats(container2, imageFsStorageUUID)
|
||||||
|
|
||||||
sandbox2 = makeFakePodSandbox("sandbox2-name", "sandbox2-uid", "sandbox2-ns")
|
sandbox2 = makeFakePodSandbox("sandbox2-name", "sandbox2-uid", "sandbox2-ns")
|
||||||
container3 = makeFakeContainer(sandbox2, "container3-name", 0, true)
|
container3 = makeFakeContainer(sandbox2, cName3, 0, true)
|
||||||
containerStats3 = makeFakeContainerStats(container3, imageFsStorageUUID)
|
containerStats3 = makeFakeContainerStats(container3, imageFsStorageUUID)
|
||||||
container4 = makeFakeContainer(sandbox2, "container3-name", 1, false)
|
container4 = makeFakeContainer(sandbox2, cName3, 1, false)
|
||||||
containerStats4 = makeFakeContainerStats(container4, imageFsStorageUUID)
|
containerStats4 = makeFakeContainerStats(container4, imageFsStorageUUID)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -68,7 +95,27 @@ func TestCRIListPodStats(t *testing.T) {
|
|||||||
fakeImageService = critest.NewFakeImageService()
|
fakeImageService = critest.NewFakeImageService()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
infos := map[string]cadvisorapiv2.ContainerInfo{
|
||||||
|
"/": getTestContainerInfo(seedRoot, "", "", ""),
|
||||||
|
"/kubelet": getTestContainerInfo(seedKubelet, "", "", ""),
|
||||||
|
"/system": getTestContainerInfo(seedMisc, "", "", ""),
|
||||||
|
sandbox0.PodSandboxStatus.Id: getTestContainerInfo(seedSandbox0, pName0, sandbox0.PodSandboxStatus.Metadata.Namespace, leaky.PodInfraContainerName),
|
||||||
|
container0.ContainerStatus.Id: getTestContainerInfo(seedContainer0, pName0, sandbox0.PodSandboxStatus.Metadata.Namespace, cName0),
|
||||||
|
container1.ContainerStatus.Id: getTestContainerInfo(seedContainer1, pName0, sandbox0.PodSandboxStatus.Metadata.Namespace, cName1),
|
||||||
|
sandbox1.PodSandboxStatus.Id: getTestContainerInfo(seedSandbox1, pName1, sandbox1.PodSandboxStatus.Metadata.Namespace, leaky.PodInfraContainerName),
|
||||||
|
container2.ContainerStatus.Id: getTestContainerInfo(seedContainer2, pName1, sandbox1.PodSandboxStatus.Metadata.Namespace, cName2),
|
||||||
|
sandbox2.PodSandboxStatus.Id: getTestContainerInfo(seedSandbox2, pName2, sandbox2.PodSandboxStatus.Metadata.Namespace, leaky.PodInfraContainerName),
|
||||||
|
container4.ContainerStatus.Id: getTestContainerInfo(seedContainer3, pName2, sandbox2.PodSandboxStatus.Metadata.Namespace, cName3),
|
||||||
|
}
|
||||||
|
|
||||||
|
options := cadvisorapiv2.RequestOptions{
|
||||||
|
IdType: cadvisorapiv2.TypeName,
|
||||||
|
Count: 2,
|
||||||
|
Recursive: true,
|
||||||
|
}
|
||||||
|
|
||||||
mockCadvisor.
|
mockCadvisor.
|
||||||
|
On("ContainerInfoV2", "/", options).Return(infos, nil).
|
||||||
On("RootFsInfo").Return(rootFsInfo, nil).
|
On("RootFsInfo").Return(rootFsInfo, nil).
|
||||||
On("GetFsInfoByFsUUID", imageFsStorageUUID).Return(imageFsInfo, nil).
|
On("GetFsInfoByFsUUID", imageFsStorageUUID).Return(imageFsInfo, nil).
|
||||||
On("GetFsInfoByFsUUID", unknownStorageUUID).Return(cadvisorapiv2.FsInfo{}, cadvisorfs.ErrNoSuchDevice)
|
On("GetFsInfoByFsUUID", unknownStorageUUID).Return(cadvisorapiv2.FsInfo{}, cadvisorfs.ErrNoSuchDevice)
|
||||||
@@ -117,16 +164,18 @@ func TestCRIListPodStats(t *testing.T) {
|
|||||||
for _, s := range p0.Containers {
|
for _, s := range p0.Containers {
|
||||||
containerStatsMap[s.Name] = s
|
containerStatsMap[s.Name] = s
|
||||||
}
|
}
|
||||||
c0 := containerStatsMap["container0-name"]
|
|
||||||
|
c0 := containerStatsMap[cName0]
|
||||||
assert.Equal(container0.CreatedAt, c0.StartTime.UnixNano())
|
assert.Equal(container0.CreatedAt, c0.StartTime.UnixNano())
|
||||||
checkCRICPUAndMemoryStats(assert, c0, containerStats0)
|
checkCRICPUAndMemoryStats(assert, c0, infos[container0.ContainerStatus.Id].Stats[0])
|
||||||
checkCRIRootfsStats(assert, c0, containerStats0, &imageFsInfo)
|
checkCRIRootfsStats(assert, c0, containerStats0, &imageFsInfo)
|
||||||
checkCRILogsStats(assert, c0, &rootFsInfo)
|
checkCRILogsStats(assert, c0, &rootFsInfo)
|
||||||
c1 := containerStatsMap["container1-name"]
|
c1 := containerStatsMap[cName1]
|
||||||
assert.Equal(container1.CreatedAt, c1.StartTime.UnixNano())
|
assert.Equal(container1.CreatedAt, c1.StartTime.UnixNano())
|
||||||
checkCRICPUAndMemoryStats(assert, c1, containerStats1)
|
checkCRICPUAndMemoryStats(assert, c1, infos[container1.ContainerStatus.Id].Stats[0])
|
||||||
checkCRIRootfsStats(assert, c1, containerStats1, nil)
|
checkCRIRootfsStats(assert, c1, containerStats1, nil)
|
||||||
checkCRILogsStats(assert, c1, &rootFsInfo)
|
checkCRILogsStats(assert, c1, &rootFsInfo)
|
||||||
|
checkCRINetworkStats(assert, p0.Network, infos[sandbox0.PodSandboxStatus.Id].Stats[0].Network)
|
||||||
|
|
||||||
p1 := podStatsMap[statsapi.PodReference{Name: "sandbox1-name", UID: "sandbox1-uid", Namespace: "sandbox1-ns"}]
|
p1 := podStatsMap[statsapi.PodReference{Name: "sandbox1-name", UID: "sandbox1-uid", Namespace: "sandbox1-ns"}]
|
||||||
assert.Equal(sandbox1.CreatedAt, p1.StartTime.UnixNano())
|
assert.Equal(sandbox1.CreatedAt, p1.StartTime.UnixNano())
|
||||||
@@ -134,11 +183,12 @@ func TestCRIListPodStats(t *testing.T) {
|
|||||||
|
|
||||||
checkEphemeralStorageStats(assert, p1, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats2})
|
checkEphemeralStorageStats(assert, p1, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats2})
|
||||||
c2 := p1.Containers[0]
|
c2 := p1.Containers[0]
|
||||||
assert.Equal("container2-name", c2.Name)
|
assert.Equal(cName2, c2.Name)
|
||||||
assert.Equal(container2.CreatedAt, c2.StartTime.UnixNano())
|
assert.Equal(container2.CreatedAt, c2.StartTime.UnixNano())
|
||||||
checkCRICPUAndMemoryStats(assert, c2, containerStats2)
|
checkCRICPUAndMemoryStats(assert, c2, infos[container2.ContainerStatus.Id].Stats[0])
|
||||||
checkCRIRootfsStats(assert, c2, containerStats2, &imageFsInfo)
|
checkCRIRootfsStats(assert, c2, containerStats2, &imageFsInfo)
|
||||||
checkCRILogsStats(assert, c2, &rootFsInfo)
|
checkCRILogsStats(assert, c2, &rootFsInfo)
|
||||||
|
checkCRINetworkStats(assert, p1.Network, infos[sandbox1.PodSandboxStatus.Id].Stats[0].Network)
|
||||||
|
|
||||||
p2 := podStatsMap[statsapi.PodReference{Name: "sandbox2-name", UID: "sandbox2-uid", Namespace: "sandbox2-ns"}]
|
p2 := podStatsMap[statsapi.PodReference{Name: "sandbox2-name", UID: "sandbox2-uid", Namespace: "sandbox2-ns"}]
|
||||||
assert.Equal(sandbox2.CreatedAt, p2.StartTime.UnixNano())
|
assert.Equal(sandbox2.CreatedAt, p2.StartTime.UnixNano())
|
||||||
@@ -147,12 +197,13 @@ func TestCRIListPodStats(t *testing.T) {
|
|||||||
checkEphemeralStorageStats(assert, p2, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats4})
|
checkEphemeralStorageStats(assert, p2, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats4})
|
||||||
|
|
||||||
c3 := p2.Containers[0]
|
c3 := p2.Containers[0]
|
||||||
assert.Equal("container3-name", c3.Name)
|
assert.Equal(cName3, c3.Name)
|
||||||
assert.Equal(container4.CreatedAt, c3.StartTime.UnixNano())
|
assert.Equal(container4.CreatedAt, c3.StartTime.UnixNano())
|
||||||
checkCRICPUAndMemoryStats(assert, c3, containerStats4)
|
checkCRICPUAndMemoryStats(assert, c3, infos[container4.ContainerStatus.Id].Stats[0])
|
||||||
checkCRIRootfsStats(assert, c3, containerStats4, &imageFsInfo)
|
checkCRIRootfsStats(assert, c3, containerStats4, &imageFsInfo)
|
||||||
|
|
||||||
checkCRILogsStats(assert, c3, &rootFsInfo)
|
checkCRILogsStats(assert, c3, &rootFsInfo)
|
||||||
|
checkCRINetworkStats(assert, p2.Network, infos[sandbox2.PodSandboxStatus.Id].Stats[0].Network)
|
||||||
|
|
||||||
mockCadvisor.AssertExpectations(t)
|
mockCadvisor.AssertExpectations(t)
|
||||||
}
|
}
|
||||||
@@ -306,18 +357,16 @@ func makeFakeVolumeStats(volumeNames []string) []statsapi.VolumeStats {
|
|||||||
return volumes
|
return volumes
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkCRICPUAndMemoryStats(assert *assert.Assertions, actual statsapi.ContainerStats, cs *runtimeapi.ContainerStats) {
|
func checkCRICPUAndMemoryStats(assert *assert.Assertions, actual statsapi.ContainerStats, cs *cadvisorapiv2.ContainerStats) {
|
||||||
assert.Equal(cs.Cpu.Timestamp, actual.CPU.Time.UnixNano())
|
assert.Equal(cs.Timestamp.UnixNano(), actual.CPU.Time.UnixNano())
|
||||||
assert.Equal(cs.Cpu.UsageCoreNanoSeconds.Value, *actual.CPU.UsageCoreNanoSeconds)
|
assert.Equal(cs.Cpu.Usage.Total, *actual.CPU.UsageCoreNanoSeconds)
|
||||||
assert.Zero(*actual.CPU.UsageNanoCores)
|
assert.Equal(cs.CpuInst.Usage.Total, *actual.CPU.UsageNanoCores)
|
||||||
|
|
||||||
assert.Equal(cs.Memory.Timestamp, actual.Memory.Time.UnixNano())
|
assert.Equal(cs.Memory.Usage, *actual.Memory.UsageBytes)
|
||||||
assert.Nil(actual.Memory.AvailableBytes)
|
assert.Equal(cs.Memory.WorkingSet, *actual.Memory.WorkingSetBytes)
|
||||||
assert.Nil(actual.Memory.UsageBytes)
|
assert.Equal(cs.Memory.RSS, *actual.Memory.RSSBytes)
|
||||||
assert.Equal(cs.Memory.WorkingSetBytes.Value, *actual.Memory.WorkingSetBytes)
|
assert.Equal(cs.Memory.ContainerData.Pgfault, *actual.Memory.PageFaults)
|
||||||
assert.Zero(*actual.Memory.RSSBytes)
|
assert.Equal(cs.Memory.ContainerData.Pgmajfault, *actual.Memory.MajorPageFaults)
|
||||||
assert.Nil(actual.Memory.PageFaults)
|
|
||||||
assert.Nil(actual.Memory.MajorPageFaults)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkCRIRootfsStats(assert *assert.Assertions, actual statsapi.ContainerStats, cs *runtimeapi.ContainerStats, imageFsInfo *cadvisorapiv2.FsInfo) {
|
func checkCRIRootfsStats(assert *assert.Assertions, actual statsapi.ContainerStats, cs *runtimeapi.ContainerStats, imageFsInfo *cadvisorapiv2.FsInfo) {
|
||||||
@@ -361,3 +410,10 @@ func checkEphemeralStorageStats(assert *assert.Assertions, actual statsapi.PodSt
|
|||||||
assert.Equal(int(*actual.EphemeralStorage.UsedBytes), int(totalUsed))
|
assert.Equal(int(*actual.EphemeralStorage.UsedBytes), int(totalUsed))
|
||||||
assert.Equal(int(*actual.EphemeralStorage.InodesUsed), int(inodesUsed))
|
assert.Equal(int(*actual.EphemeralStorage.InodesUsed), int(inodesUsed))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func checkCRINetworkStats(assert *assert.Assertions, actual *statsapi.NetworkStats, expected *cadvisorapiv2.NetworkStats) {
|
||||||
|
assert.Equal(expected.Interfaces[0].RxBytes, *actual.RxBytes)
|
||||||
|
assert.Equal(expected.Interfaces[0].RxErrors, *actual.RxErrors)
|
||||||
|
assert.Equal(expected.Interfaces[0].TxBytes, *actual.TxBytes)
|
||||||
|
assert.Equal(expected.Interfaces[0].TxErrors, *actual.TxErrors)
|
||||||
|
}
|
||||||
|
|||||||
@@ -86,29 +86,12 @@ func cadvisorInfoToContainerStats(name string, info *cadvisorapiv2.ContainerInfo
|
|||||||
|
|
||||||
if rootFs != nil {
|
if rootFs != nil {
|
||||||
// The container logs live on the node rootfs device
|
// The container logs live on the node rootfs device
|
||||||
result.Logs = &statsapi.FsStats{
|
result.Logs = buildLogsStats(cstat, rootFs)
|
||||||
Time: metav1.NewTime(cstat.Timestamp),
|
|
||||||
AvailableBytes: &rootFs.Available,
|
|
||||||
CapacityBytes: &rootFs.Capacity,
|
|
||||||
InodesFree: rootFs.InodesFree,
|
|
||||||
Inodes: rootFs.Inodes,
|
|
||||||
}
|
|
||||||
|
|
||||||
if rootFs.Inodes != nil && rootFs.InodesFree != nil {
|
|
||||||
logsInodesUsed := *rootFs.Inodes - *rootFs.InodesFree
|
|
||||||
result.Logs.InodesUsed = &logsInodesUsed
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if imageFs != nil {
|
if imageFs != nil {
|
||||||
// The container rootFs lives on the imageFs devices (which may not be the node root fs)
|
// The container rootFs lives on the imageFs devices (which may not be the node root fs)
|
||||||
result.Rootfs = &statsapi.FsStats{
|
result.Rootfs = buildRootfsStats(cstat, imageFs)
|
||||||
Time: metav1.NewTime(cstat.Timestamp),
|
|
||||||
AvailableBytes: &imageFs.Available,
|
|
||||||
CapacityBytes: &imageFs.Capacity,
|
|
||||||
InodesFree: imageFs.InodesFree,
|
|
||||||
Inodes: imageFs.Inodes,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cfs := cstat.Filesystem
|
cfs := cstat.Filesystem
|
||||||
@@ -274,3 +257,29 @@ func getCgroupStats(cadvisor cadvisor.Interface, containerName string) (*cadviso
|
|||||||
}
|
}
|
||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func buildLogsStats(cstat *cadvisorapiv2.ContainerStats, rootFs *cadvisorapiv2.FsInfo) *statsapi.FsStats {
|
||||||
|
fsStats := &statsapi.FsStats{
|
||||||
|
Time: metav1.NewTime(cstat.Timestamp),
|
||||||
|
AvailableBytes: &rootFs.Available,
|
||||||
|
CapacityBytes: &rootFs.Capacity,
|
||||||
|
InodesFree: rootFs.InodesFree,
|
||||||
|
Inodes: rootFs.Inodes,
|
||||||
|
}
|
||||||
|
|
||||||
|
if rootFs.Inodes != nil && rootFs.InodesFree != nil {
|
||||||
|
logsInodesUsed := *rootFs.Inodes - *rootFs.InodesFree
|
||||||
|
fsStats.InodesUsed = &logsInodesUsed
|
||||||
|
}
|
||||||
|
return fsStats
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildRootfsStats(cstat *cadvisorapiv2.ContainerStats, imageFs *cadvisorapiv2.FsInfo) *statsapi.FsStats {
|
||||||
|
return &statsapi.FsStats{
|
||||||
|
Time: metav1.NewTime(cstat.Timestamp),
|
||||||
|
AvailableBytes: &imageFs.Available,
|
||||||
|
CapacityBytes: &imageFs.Capacity,
|
||||||
|
InodesFree: imageFs.InodesFree,
|
||||||
|
Inodes: imageFs.Inodes,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ go_library(
|
|||||||
"//pkg/kubelet/util/format:go_default_library",
|
"//pkg/kubelet/util/format:go_default_library",
|
||||||
"//pkg/kubelet/volumemanager/cache:go_default_library",
|
"//pkg/kubelet/volumemanager/cache:go_default_library",
|
||||||
"//pkg/volume:go_default_library",
|
"//pkg/volume:go_default_library",
|
||||||
|
"//pkg/volume/util:go_default_library",
|
||||||
"//pkg/volume/util/types:go_default_library",
|
"//pkg/volume/util/types:go_default_library",
|
||||||
"//pkg/volume/util/volumehelper:go_default_library",
|
"//pkg/volume/util/volumehelper:go_default_library",
|
||||||
"//vendor/github.com/golang/glog:go_default_library",
|
"//vendor/github.com/golang/glog:go_default_library",
|
||||||
|
|||||||
@@ -41,6 +41,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
)
|
)
|
||||||
@@ -419,7 +420,8 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getPVCExtractPV fetches the PVC object with the given namespace and name from
|
// getPVCExtractPV fetches the PVC object with the given namespace and name from
|
||||||
// the API server extracts the name of the PV it is pointing to and returns it.
|
// the API server, checks whether PVC is being deleted, extracts the name of the PV
|
||||||
|
// it is pointing to and returns it.
|
||||||
// An error is returned if the PVC object's phase is not "Bound".
|
// An error is returned if the PVC object's phase is not "Bound".
|
||||||
func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
|
func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
|
||||||
namespace string, claimName string) (string, types.UID, error) {
|
namespace string, claimName string) (string, types.UID, error) {
|
||||||
@@ -433,6 +435,23 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
|
|||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.PVCProtection) {
|
||||||
|
// Pods that uses a PVC that is being deleted must not be started.
|
||||||
|
//
|
||||||
|
// In case an old kubelet is running without this check or some kubelets
|
||||||
|
// have this feature disabled, the worst that can happen is that such
|
||||||
|
// pod is scheduled. This was the default behavior in 1.8 and earlier
|
||||||
|
// and users should not be that surprised.
|
||||||
|
// It should happen only in very rare case when scheduler schedules
|
||||||
|
// a pod and user deletes a PVC that's used by it at the same time.
|
||||||
|
if volumeutil.IsPVCBeingDeleted(pvc) {
|
||||||
|
return "", "", fmt.Errorf(
|
||||||
|
"can't start pod because PVC %s/%s is being deleted",
|
||||||
|
namespace,
|
||||||
|
claimName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" {
|
if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" {
|
||||||
|
|
||||||
return "", "", fmt.Errorf(
|
return "", "", fmt.Errorf(
|
||||||
|
|||||||
@@ -3386,6 +3386,9 @@ func describePodSecurityPolicy(psp *extensions.PodSecurityPolicy) (string, error
|
|||||||
w.Write(LEVEL_1, "Allowed Capabilities:\t%s\n", capsToString(psp.Spec.AllowedCapabilities))
|
w.Write(LEVEL_1, "Allowed Capabilities:\t%s\n", capsToString(psp.Spec.AllowedCapabilities))
|
||||||
w.Write(LEVEL_1, "Allowed Volume Types:\t%s\n", fsTypeToString(psp.Spec.Volumes))
|
w.Write(LEVEL_1, "Allowed Volume Types:\t%s\n", fsTypeToString(psp.Spec.Volumes))
|
||||||
|
|
||||||
|
if len(psp.Spec.AllowedFlexVolumes) > 0 {
|
||||||
|
w.Write(LEVEL_1, "Allowed FlexVolume Types:\t%s\n", flexVolumesToString(psp.Spec.AllowedFlexVolumes))
|
||||||
|
}
|
||||||
w.Write(LEVEL_1, "Allow Host Network:\t%t\n", psp.Spec.HostNetwork)
|
w.Write(LEVEL_1, "Allow Host Network:\t%t\n", psp.Spec.HostNetwork)
|
||||||
w.Write(LEVEL_1, "Allow Host Ports:\t%s\n", hostPortRangeToString(psp.Spec.HostPorts))
|
w.Write(LEVEL_1, "Allow Host Ports:\t%s\n", hostPortRangeToString(psp.Spec.HostPorts))
|
||||||
w.Write(LEVEL_1, "Allow Host PID:\t%t\n", psp.Spec.HostPID)
|
w.Write(LEVEL_1, "Allow Host PID:\t%t\n", psp.Spec.HostPID)
|
||||||
@@ -3419,10 +3422,14 @@ func describePodSecurityPolicy(psp *extensions.PodSecurityPolicy) (string, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
func stringOrNone(s string) string {
|
func stringOrNone(s string) string {
|
||||||
|
return stringOrDefaultValue(s, "<none>")
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringOrDefaultValue(s, defaultValue string) string {
|
||||||
if len(s) > 0 {
|
if len(s) > 0 {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
return "<none>"
|
return defaultValue
|
||||||
}
|
}
|
||||||
|
|
||||||
func fsTypeToString(volumes []extensions.FSType) string {
|
func fsTypeToString(volumes []extensions.FSType) string {
|
||||||
@@ -3433,6 +3440,14 @@ func fsTypeToString(volumes []extensions.FSType) string {
|
|||||||
return stringOrNone(strings.Join(strVolumes, ","))
|
return stringOrNone(strings.Join(strVolumes, ","))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func flexVolumesToString(flexVolumes []extensions.AllowedFlexVolume) string {
|
||||||
|
volumes := []string{}
|
||||||
|
for _, flexVolume := range flexVolumes {
|
||||||
|
volumes = append(volumes, "driver="+flexVolume.Driver)
|
||||||
|
}
|
||||||
|
return stringOrDefaultValue(strings.Join(volumes, ","), "<all>")
|
||||||
|
}
|
||||||
|
|
||||||
func hostPortRangeToString(ranges []extensions.HostPortRange) string {
|
func hostPortRangeToString(ranges []extensions.HostPortRange) string {
|
||||||
formattedString := ""
|
formattedString := ""
|
||||||
if ranges != nil {
|
if ranges != nil {
|
||||||
|
|||||||
@@ -343,6 +343,7 @@ func primeAggregatedClusterRoles(clusterRolesToAggregate map[string]string, clus
|
|||||||
}
|
}
|
||||||
glog.V(1).Infof("migrating %v to %v", existingRole.Name, newName)
|
glog.V(1).Infof("migrating %v to %v", existingRole.Name, newName)
|
||||||
existingRole.Name = newName
|
existingRole.Name = newName
|
||||||
|
existingRole.ResourceVersion = "" // clear this so the object can be created.
|
||||||
if _, err := clusterRoleClient.ClusterRoles().Create(existingRole); err != nil && !apierrors.IsAlreadyExists(err) {
|
if _, err := clusterRoleClient.ClusterRoles().Create(existingRole); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -233,9 +233,24 @@ func (s *simpleProvider) ValidatePodSecurityContext(pod *api.Pod, fldPath *field
|
|||||||
fmt.Sprintf("is not allowed to be used")))
|
fmt.Sprintf("is not allowed to be used")))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if fsType == extensions.FlexVolume && len(s.psp.Spec.AllowedFlexVolumes) > 0 {
|
||||||
|
found := false
|
||||||
|
driver := v.FlexVolume.Driver
|
||||||
|
for _, allowedFlexVolume := range s.psp.Spec.AllowedFlexVolumes {
|
||||||
|
if driver == allowedFlexVolume.Driver {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
allErrs = append(allErrs,
|
||||||
|
field.Invalid(fldPath.Child("volumes").Index(i).Child("driver"), driver,
|
||||||
|
"Flexvolume driver is not allowed to be used"))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -256,6 +256,18 @@ func TestValidatePodSecurityContextFailures(t *testing.T) {
|
|||||||
failSeccompProfilePod := defaultPod()
|
failSeccompProfilePod := defaultPod()
|
||||||
failSeccompProfilePod.Annotations = map[string]string{api.SeccompPodAnnotationKey: "foo"}
|
failSeccompProfilePod.Annotations = map[string]string{api.SeccompPodAnnotationKey: "foo"}
|
||||||
|
|
||||||
|
podWithInvalidFlexVolumeDriver := defaultPod()
|
||||||
|
podWithInvalidFlexVolumeDriver.Spec.Volumes = []api.Volume{
|
||||||
|
{
|
||||||
|
Name: "flex-volume",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
FlexVolume: &api.FlexVolumeSource{
|
||||||
|
Driver: "example/unknown",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
errorCases := map[string]struct {
|
errorCases := map[string]struct {
|
||||||
pod *api.Pod
|
pod *api.Pod
|
||||||
psp *extensions.PodSecurityPolicy
|
psp *extensions.PodSecurityPolicy
|
||||||
@@ -341,6 +353,16 @@ func TestValidatePodSecurityContextFailures(t *testing.T) {
|
|||||||
psp: defaultPSP(),
|
psp: defaultPSP(),
|
||||||
expectedError: "Forbidden: seccomp may not be set",
|
expectedError: "Forbidden: seccomp may not be set",
|
||||||
},
|
},
|
||||||
|
"fail pod with disallowed flexVolume when flex volumes are allowed": {
|
||||||
|
pod: podWithInvalidFlexVolumeDriver,
|
||||||
|
psp: allowFlexVolumesPSP(false, false),
|
||||||
|
expectedError: "Flexvolume driver is not allowed to be used",
|
||||||
|
},
|
||||||
|
"fail pod with disallowed flexVolume when all volumes are allowed": {
|
||||||
|
pod: podWithInvalidFlexVolumeDriver,
|
||||||
|
psp: allowFlexVolumesPSP(false, true),
|
||||||
|
expectedError: "Flexvolume driver is not allowed to be used",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for k, v := range errorCases {
|
for k, v := range errorCases {
|
||||||
provider, err := NewSimpleProvider(v.psp, "namespace", NewSimpleStrategyFactory())
|
provider, err := NewSimpleProvider(v.psp, "namespace", NewSimpleStrategyFactory())
|
||||||
@@ -358,6 +380,28 @@ func TestValidatePodSecurityContextFailures(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func allowFlexVolumesPSP(allowAllFlexVolumes, allowAllVolumes bool) *extensions.PodSecurityPolicy {
|
||||||
|
psp := defaultPSP()
|
||||||
|
|
||||||
|
allowedVolumes := []extensions.AllowedFlexVolume{
|
||||||
|
{Driver: "example/foo"},
|
||||||
|
{Driver: "example/bar"},
|
||||||
|
}
|
||||||
|
if allowAllFlexVolumes {
|
||||||
|
allowedVolumes = []extensions.AllowedFlexVolume{}
|
||||||
|
}
|
||||||
|
|
||||||
|
allowedVolumeType := extensions.FlexVolume
|
||||||
|
if allowAllVolumes {
|
||||||
|
allowedVolumeType = extensions.All
|
||||||
|
}
|
||||||
|
|
||||||
|
psp.Spec.AllowedFlexVolumes = allowedVolumes
|
||||||
|
psp.Spec.Volumes = []extensions.FSType{allowedVolumeType}
|
||||||
|
|
||||||
|
return psp
|
||||||
|
}
|
||||||
|
|
||||||
func TestValidateContainerSecurityContextFailures(t *testing.T) {
|
func TestValidateContainerSecurityContextFailures(t *testing.T) {
|
||||||
// fail user strat
|
// fail user strat
|
||||||
failUserPSP := defaultPSP()
|
failUserPSP := defaultPSP()
|
||||||
@@ -597,6 +641,18 @@ func TestValidatePodSecurityContextSuccess(t *testing.T) {
|
|||||||
api.SeccompPodAnnotationKey: "foo",
|
api.SeccompPodAnnotationKey: "foo",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
flexVolumePod := defaultPod()
|
||||||
|
flexVolumePod.Spec.Volumes = []api.Volume{
|
||||||
|
{
|
||||||
|
Name: "flex-volume",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
FlexVolume: &api.FlexVolumeSource{
|
||||||
|
Driver: "example/bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
successCases := map[string]struct {
|
successCases := map[string]struct {
|
||||||
pod *api.Pod
|
pod *api.Pod
|
||||||
psp *extensions.PodSecurityPolicy
|
psp *extensions.PodSecurityPolicy
|
||||||
@@ -653,6 +709,22 @@ func TestValidatePodSecurityContextSuccess(t *testing.T) {
|
|||||||
pod: seccompPod,
|
pod: seccompPod,
|
||||||
psp: seccompPSP,
|
psp: seccompPSP,
|
||||||
},
|
},
|
||||||
|
"flex volume driver in a whitelist (all volumes are allowed)": {
|
||||||
|
pod: flexVolumePod,
|
||||||
|
psp: allowFlexVolumesPSP(false, true),
|
||||||
|
},
|
||||||
|
"flex volume driver with empty whitelist (all volumes are allowed)": {
|
||||||
|
pod: flexVolumePod,
|
||||||
|
psp: allowFlexVolumesPSP(true, true),
|
||||||
|
},
|
||||||
|
"flex volume driver in a whitelist (only flex volumes are allowed)": {
|
||||||
|
pod: flexVolumePod,
|
||||||
|
psp: allowFlexVolumesPSP(false, false),
|
||||||
|
},
|
||||||
|
"flex volume driver with empty whitelist (only flex volumes volumes are allowed)": {
|
||||||
|
pod: flexVolumePod,
|
||||||
|
psp: allowFlexVolumesPSP(true, false),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range successCases {
|
for k, v := range successCases {
|
||||||
|
|||||||
@@ -46,6 +46,7 @@ go_test(
|
|||||||
"//pkg/volume/testing:go_default_library",
|
"//pkg/volume/testing:go_default_library",
|
||||||
"//vendor/github.com/golang/glog:go_default_library",
|
"//vendor/github.com/golang/glog:go_default_library",
|
||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ import (
|
|||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -340,3 +341,10 @@ func (testcase *testcase) GetVolumeLabels(volumeName aws.KubernetesVolumeID) (ma
|
|||||||
func (testcase *testcase) GetDiskPath(volumeName aws.KubernetesVolumeID) (string, error) {
|
func (testcase *testcase) GetDiskPath(volumeName aws.KubernetesVolumeID) (string, error) {
|
||||||
return "", errors.New("Not implemented")
|
return "", errors.New("Not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (testcase *testcase) ResizeDisk(
|
||||||
|
volumeName aws.KubernetesVolumeID,
|
||||||
|
oldSize resource.Quantity,
|
||||||
|
newSize resource.Quantity) (resource.Quantity, error) {
|
||||||
|
return oldSize, errors.New("Not implemented")
|
||||||
|
}
|
||||||
|
|||||||
@@ -241,6 +241,33 @@ func (plugin *awsElasticBlockStorePlugin) ConstructVolumeSpec(volName, mountPath
|
|||||||
return volume.NewSpecFromVolume(awsVolume), nil
|
return volume.NewSpecFromVolume(awsVolume), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *awsElasticBlockStorePlugin) RequiresFSResize() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (plugin *awsElasticBlockStorePlugin) ExpandVolumeDevice(
|
||||||
|
spec *volume.Spec,
|
||||||
|
newSize resource.Quantity,
|
||||||
|
oldSize resource.Quantity) (resource.Quantity, error) {
|
||||||
|
var awsVolume aws.Volumes
|
||||||
|
|
||||||
|
awsVolume, err := getCloudProvider(plugin.host.GetCloudProvider())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return oldSize, err
|
||||||
|
}
|
||||||
|
// we don't expect to receive this call for non PVs
|
||||||
|
rawVolumeName := spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID
|
||||||
|
volumeID := aws.KubernetesVolumeID(rawVolumeName)
|
||||||
|
|
||||||
|
if volumeID == "" {
|
||||||
|
return oldSize, fmt.Errorf("EBS.ExpandVolumeDevice Invalid volume id for %s", spec.Name())
|
||||||
|
}
|
||||||
|
return awsVolume.ResizeDisk(volumeID, oldSize, newSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ volume.ExpandableVolumePlugin = &awsElasticBlockStorePlugin{}
|
||||||
|
|
||||||
// Abstract interface to PD operations.
|
// Abstract interface to PD operations.
|
||||||
type ebsManager interface {
|
type ebsManager interface {
|
||||||
CreateVolume(provisioner *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error)
|
CreateVolume(provisioner *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error)
|
||||||
|
|||||||
@@ -63,6 +63,12 @@ func TestCanSupport(t *testing.T) {
|
|||||||
if plugin.GetPluginName() != downwardAPIPluginName {
|
if plugin.GetPluginName() != downwardAPIPluginName {
|
||||||
t.Errorf("Wrong name: %s", plugin.GetPluginName())
|
t.Errorf("Wrong name: %s", plugin.GetPluginName())
|
||||||
}
|
}
|
||||||
|
if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{DownwardAPI: &v1.DownwardAPIVolumeSource{}}}}) {
|
||||||
|
t.Errorf("Expected true")
|
||||||
|
}
|
||||||
|
if plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
|
||||||
|
t.Errorf("Expected false")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDownwardAPI(t *testing.T) {
|
func TestDownwardAPI(t *testing.T) {
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ go_library(
|
|||||||
"device_util_unsupported.go",
|
"device_util_unsupported.go",
|
||||||
"doc.go",
|
"doc.go",
|
||||||
"error.go",
|
"error.go",
|
||||||
|
"finalizer.go",
|
||||||
"fs_unsupported.go",
|
"fs_unsupported.go",
|
||||||
"io_util.go",
|
"io_util.go",
|
||||||
"metrics.go",
|
"metrics.go",
|
||||||
@@ -61,6 +62,7 @@ go_library(
|
|||||||
go_test(
|
go_test(
|
||||||
name = "go_default_test",
|
name = "go_default_test",
|
||||||
srcs = [
|
srcs = [
|
||||||
|
"finalizer_test.go",
|
||||||
"util_test.go",
|
"util_test.go",
|
||||||
] + select({
|
] + select({
|
||||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||||
@@ -74,6 +76,7 @@ go_test(
|
|||||||
deps = [
|
deps = [
|
||||||
"//pkg/apis/core/install:go_default_library",
|
"//pkg/apis/core/install:go_default_library",
|
||||||
"//pkg/apis/core/v1/helper:go_default_library",
|
"//pkg/apis/core/v1/helper:go_default_library",
|
||||||
|
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
|
|||||||
68
pkg/volume/util/finalizer.go
Normal file
68
pkg/volume/util/finalizer.go
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Name of finalizer on PVCs that have a running pod.
|
||||||
|
PVCProtectionFinalizer = "kubernetes.io/pvc-protection"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsPVCBeingDeleted returns:
|
||||||
|
// true: in case PVC is being deleted, i.e. ObjectMeta.DeletionTimestamp is set
|
||||||
|
// false: in case PVC is not being deleted, i.e. ObjectMeta.DeletionTimestamp is nil
|
||||||
|
func IsPVCBeingDeleted(pvc *v1.PersistentVolumeClaim) bool {
|
||||||
|
return pvc.ObjectMeta.DeletionTimestamp != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsProtectionFinalizerPresent returns true in case PVCProtectionFinalizer is
|
||||||
|
// present among the pvc.Finalizers
|
||||||
|
func IsProtectionFinalizerPresent(pvc *v1.PersistentVolumeClaim) bool {
|
||||||
|
for _, finalizer := range pvc.Finalizers {
|
||||||
|
if finalizer == PVCProtectionFinalizer {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveProtectionFinalizer returns pvc without PVCProtectionFinalizer in case
|
||||||
|
// it's present in pvc.Finalizers. It expects that pvc is writable (i.e. is not
|
||||||
|
// informer's cached copy.)
|
||||||
|
func RemoveProtectionFinalizer(pvc *v1.PersistentVolumeClaim) {
|
||||||
|
newFinalizers := make([]string, 0)
|
||||||
|
for _, finalizer := range pvc.Finalizers {
|
||||||
|
if finalizer != PVCProtectionFinalizer {
|
||||||
|
newFinalizers = append(newFinalizers, finalizer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(newFinalizers) == 0 {
|
||||||
|
// Sanitize for unit tests so we don't need to distinguish empty array
|
||||||
|
// and nil.
|
||||||
|
newFinalizers = nil
|
||||||
|
}
|
||||||
|
pvc.Finalizers = newFinalizers
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddProtectionFinalizer adds PVCProtectionFinalizer to pvc. It expects that
|
||||||
|
// pvc is writable (i.e. is not informer's cached copy.)
|
||||||
|
func AddProtectionFinalizer(pvc *v1.PersistentVolumeClaim) {
|
||||||
|
pvc.Finalizers = append(pvc.Finalizers, PVCProtectionFinalizer)
|
||||||
|
}
|
||||||
231
pkg/volume/util/finalizer_test.go
Normal file
231
pkg/volume/util/finalizer_test.go
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
arbitraryTime = metav1.Date(2017, 11, 1, 14, 28, 47, 0, time.FixedZone("CET", 0))
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIsPVCBeingDeleted(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
pvc *v1.PersistentVolumeClaim
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
pvc: &v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
DeletionTimestamp: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pvc: &v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
DeletionTimestamp: &arbitraryTime,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
if got := IsPVCBeingDeleted(tt.pvc); got != tt.want {
|
||||||
|
t.Errorf("IsPVCBeingDeleted(%v) = %v WANT %v", tt.pvc, got, tt.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddProtectionFinalizer(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
pvc *v1.PersistentVolumeClaim
|
||||||
|
want *v1.PersistentVolumeClaim
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"PVC without finalizer",
|
||||||
|
&v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pvc",
|
||||||
|
Namespace: "ns",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pvc",
|
||||||
|
Namespace: "ns",
|
||||||
|
Finalizers: []string{PVCProtectionFinalizer},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"PVC with some finalizers",
|
||||||
|
&v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pvc",
|
||||||
|
Namespace: "ns",
|
||||||
|
Finalizers: []string{"1", "2", "3", PVCProtectionFinalizer + "suffix", "prefix" + PVCProtectionFinalizer},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pvc",
|
||||||
|
Namespace: "ns",
|
||||||
|
Finalizers: []string{"1", "2", "3", PVCProtectionFinalizer + "suffix", "prefix" + PVCProtectionFinalizer, PVCProtectionFinalizer},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
got := test.pvc.DeepCopy()
|
||||||
|
AddProtectionFinalizer(got)
|
||||||
|
if !reflect.DeepEqual(got, test.want) {
|
||||||
|
t.Errorf("Test %q: expected:\n%s\n\ngot:\n%s", test.name, spew.Sdump(test.want), spew.Sdump(got))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoveProtectionFinalizer(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
pvc *v1.PersistentVolumeClaim
|
||||||
|
want *v1.PersistentVolumeClaim
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"PVC without finalizer",
|
||||||
|
&v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pvc",
|
||||||
|
Namespace: "ns",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pvc",
|
||||||
|
Namespace: "ns",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"PVC with finalizer",
|
||||||
|
&v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pvc",
|
||||||
|
Namespace: "ns",
|
||||||
|
Finalizers: []string{PVCProtectionFinalizer},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pvc",
|
||||||
|
Namespace: "ns",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"PVC with many finalizers",
|
||||||
|
&v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pvc",
|
||||||
|
Namespace: "ns",
|
||||||
|
Finalizers: []string{"1", "2", "3", PVCProtectionFinalizer + "suffix", "prefix" + PVCProtectionFinalizer, PVCProtectionFinalizer},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pvc",
|
||||||
|
Namespace: "ns",
|
||||||
|
Finalizers: []string{"1", "2", "3", PVCProtectionFinalizer + "suffix", "prefix" + PVCProtectionFinalizer},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
got := test.pvc.DeepCopy()
|
||||||
|
RemoveProtectionFinalizer(got)
|
||||||
|
if !reflect.DeepEqual(got, test.want) {
|
||||||
|
t.Errorf("Test %q: expected:\n%s\n\ngot:\n%s", test.name, spew.Sdump(test.want), spew.Sdump(got))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsProtectionFinalizerPresent(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
pvc *v1.PersistentVolumeClaim
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"PVC without finalizer",
|
||||||
|
&v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pvc",
|
||||||
|
Namespace: "ns",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"PVC with many unrelated finalizers",
|
||||||
|
&v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pvc",
|
||||||
|
Namespace: "ns",
|
||||||
|
Finalizers: []string{"1", "2", "3", PVCProtectionFinalizer + "suffix", "prefix" + PVCProtectionFinalizer},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"PVC with many finalizers",
|
||||||
|
&v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pvc",
|
||||||
|
Namespace: "ns",
|
||||||
|
Finalizers: []string{"1", "2", "3", PVCProtectionFinalizer + "suffix", "prefix" + PVCProtectionFinalizer, PVCProtectionFinalizer},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"PVC with finalizer",
|
||||||
|
&v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pvc",
|
||||||
|
Namespace: "ns",
|
||||||
|
Finalizers: []string{PVCProtectionFinalizer},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
got := IsProtectionFinalizerPresent(test.pvc)
|
||||||
|
if got != test.want {
|
||||||
|
t.Errorf("Test %q: expected %v, got %v", test.name, test.want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -29,6 +29,7 @@ filegroup(
|
|||||||
"//plugin/pkg/admission/noderestriction:all-srcs",
|
"//plugin/pkg/admission/noderestriction:all-srcs",
|
||||||
"//plugin/pkg/admission/persistentvolume/label:all-srcs",
|
"//plugin/pkg/admission/persistentvolume/label:all-srcs",
|
||||||
"//plugin/pkg/admission/persistentvolume/resize:all-srcs",
|
"//plugin/pkg/admission/persistentvolume/resize:all-srcs",
|
||||||
|
"//plugin/pkg/admission/persistentvolumeclaim/pvcprotection:all-srcs",
|
||||||
"//plugin/pkg/admission/podnodeselector:all-srcs",
|
"//plugin/pkg/admission/podnodeselector:all-srcs",
|
||||||
"//plugin/pkg/admission/podpreset:all-srcs",
|
"//plugin/pkg/admission/podpreset:all-srcs",
|
||||||
"//plugin/pkg/admission/podtolerationrestriction:all-srcs",
|
"//plugin/pkg/admission/podtolerationrestriction:all-srcs",
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ go_test(
|
|||||||
deps = [
|
deps = [
|
||||||
"//pkg/apis/core:go_default_library",
|
"//pkg/apis/core:go_default_library",
|
||||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ import (
|
|||||||
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apiserver/pkg/admission"
|
"k8s.io/apiserver/pkg/admission"
|
||||||
@@ -67,6 +68,13 @@ func (c *mockVolumes) DisksAreAttached(nodeDisks map[types.NodeName][]aws.Kubern
|
|||||||
return nil, fmt.Errorf("not implemented")
|
return nil, fmt.Errorf("not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *mockVolumes) ResizeDisk(
|
||||||
|
diskName aws.KubernetesVolumeID,
|
||||||
|
oldSize resource.Quantity,
|
||||||
|
newSize resource.Quantity) (resource.Quantity, error) {
|
||||||
|
return oldSize, nil
|
||||||
|
}
|
||||||
|
|
||||||
func mockVolumeFailure(err error) *mockVolumes {
|
func mockVolumeFailure(err error) *mockVolumes {
|
||||||
return &mockVolumes{volumeLabelsError: err}
|
return &mockVolumes{volumeLabelsError: err}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -156,6 +156,10 @@ func (pvcr *persistentVolumeClaimResize) checkVolumePlugin(pv *api.PersistentVol
|
|||||||
if pv.Spec.GCEPersistentDisk != nil {
|
if pv.Spec.GCEPersistentDisk != nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
|
||||||
|
|
||||||
|
if pv.Spec.AWSElasticBlockStore != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,51 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["admission.go"],
|
||||||
|
importpath = "k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//pkg/apis/core:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||||
|
"//pkg/client/listers/core/internalversion:go_default_library",
|
||||||
|
"//pkg/features:go_default_library",
|
||||||
|
"//pkg/kubeapiserver/admission:go_default_library",
|
||||||
|
"//pkg/volume/util:go_default_library",
|
||||||
|
"//vendor/github.com/golang/glog:go_default_library",
|
||||||
|
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||||
|
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "go_default_test",
|
||||||
|
srcs = ["admission_test.go"],
|
||||||
|
importpath = "k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection",
|
||||||
|
library = ":go_default_library",
|
||||||
|
deps = [
|
||||||
|
"//pkg/apis/core:go_default_library",
|
||||||
|
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||||
|
"//pkg/controller:go_default_library",
|
||||||
|
"//pkg/volume/util:go_default_library",
|
||||||
|
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
|
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||||
|
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
@@ -0,0 +1,111 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package pvcprotection
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
admission "k8s.io/apiserver/pkg/admission"
|
||||||
|
"k8s.io/apiserver/pkg/util/feature"
|
||||||
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||||
|
corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||||
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// PluginName is the name of this admission controller plugin
|
||||||
|
PluginName = "PVCProtection"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Register registers a plugin
|
||||||
|
func Register(plugins *admission.Plugins) {
|
||||||
|
plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) {
|
||||||
|
plugin := newPlugin()
|
||||||
|
return plugin, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// pvcProtectionPlugin holds state for and implements the admission plugin.
|
||||||
|
type pvcProtectionPlugin struct {
|
||||||
|
*admission.Handler
|
||||||
|
lister corelisters.PersistentVolumeClaimLister
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ admission.Interface = &pvcProtectionPlugin{}
|
||||||
|
var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&pvcProtectionPlugin{})
|
||||||
|
|
||||||
|
// newPlugin creates a new admission plugin.
|
||||||
|
func newPlugin() *pvcProtectionPlugin {
|
||||||
|
return &pvcProtectionPlugin{
|
||||||
|
Handler: admission.NewHandler(admission.Create),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *pvcProtectionPlugin) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) {
|
||||||
|
informer := f.Core().InternalVersion().PersistentVolumeClaims()
|
||||||
|
c.lister = informer.Lister()
|
||||||
|
c.SetReadyFunc(informer.Informer().HasSynced)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateInitialization ensures lister is set.
|
||||||
|
func (c *pvcProtectionPlugin) ValidateInitialization() error {
|
||||||
|
if c.lister == nil {
|
||||||
|
return fmt.Errorf("missing lister")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Admit sets finalizer on all PVCs. The finalizer is removed by
|
||||||
|
// PVCProtectionController when it's not referenced by any pod.
|
||||||
|
//
|
||||||
|
// This prevents users from deleting a PVC that's used by a running pod.
|
||||||
|
func (c *pvcProtectionPlugin) Admit(a admission.Attributes) error {
|
||||||
|
if !feature.DefaultFeatureGate.Enabled(features.PVCProtection) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.GetResource().GroupResource() != api.Resource("persistentvolumeclaims") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(a.GetSubresource()) != 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pvc, ok := a.GetObject().(*api.PersistentVolumeClaim)
|
||||||
|
// if we can't convert then we don't handle this object so just return
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range pvc.Finalizers {
|
||||||
|
if f == volumeutil.PVCProtectionFinalizer {
|
||||||
|
// Finalizer is already present, nothing to do
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(4).Infof("adding PVC protection finalizer to %s/%s", pvc.Namespace, pvc.Name)
|
||||||
|
pvc.Finalizers = append(pvc.Finalizers, volumeutil.PVCProtectionFinalizer)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,106 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package pvcprotection
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apiserver/pkg/admission"
|
||||||
|
"k8s.io/apiserver/pkg/util/feature"
|
||||||
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||||
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAdmit(t *testing.T) {
|
||||||
|
claim := &api.PersistentVolumeClaim{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "PersistentVolumeClaim",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "claim",
|
||||||
|
Namespace: "ns",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
claimWithFinalizer := claim.DeepCopy()
|
||||||
|
claimWithFinalizer.Finalizers = []string{volumeutil.PVCProtectionFinalizer}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
object runtime.Object
|
||||||
|
expectedObject runtime.Object
|
||||||
|
featureEnabled bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"create -> add finalizer",
|
||||||
|
claim,
|
||||||
|
claimWithFinalizer,
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"finalizer already exists -> no new finalizer",
|
||||||
|
claimWithFinalizer,
|
||||||
|
claimWithFinalizer,
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"disabled feature -> no finalizer",
|
||||||
|
claim,
|
||||||
|
claim,
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctrl := newPlugin()
|
||||||
|
informerFactory := informers.NewSharedInformerFactory(nil, controller.NoResyncPeriodFunc())
|
||||||
|
ctrl.SetInternalKubeInformerFactory(informerFactory)
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
feature.DefaultFeatureGate.Set(fmt.Sprintf("PVCProtection=%v", test.featureEnabled))
|
||||||
|
obj := test.object.DeepCopyObject()
|
||||||
|
attrs := admission.NewAttributesRecord(
|
||||||
|
obj, // new object
|
||||||
|
obj.DeepCopyObject(), // old object, copy to be sure it's not modified
|
||||||
|
api.Kind("PersistentVolumeClaim").WithVersion("version"),
|
||||||
|
claim.Namespace,
|
||||||
|
claim.Name,
|
||||||
|
api.Resource("persistentvolumeclaims").WithVersion("version"),
|
||||||
|
"", // subresource
|
||||||
|
admission.Create,
|
||||||
|
nil, // userInfo
|
||||||
|
)
|
||||||
|
|
||||||
|
err := ctrl.Admit(attrs)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Test %q: got unexpected error: %v", test.name, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(test.expectedObject, obj) {
|
||||||
|
t.Errorf("Test %q: Expected object:\n%s\ngot:\n%s", test.name, spew.Sdump(test.expectedObject), spew.Sdump(obj))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable the feature for rest of the tests.
|
||||||
|
// TODO: remove after alpha
|
||||||
|
feature.DefaultFeatureGate.Set("PVCProtection=false")
|
||||||
|
}
|
||||||
@@ -315,6 +315,16 @@ func buildControllerRoles() ([]rbac.ClusterRole, []rbac.ClusterRoleBinding) {
|
|||||||
eventsRule(),
|
eventsRule(),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.PVCProtection) {
|
||||||
|
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pvc-protection-controller"},
|
||||||
|
Rules: []rbac.PolicyRule{
|
||||||
|
rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||||
|
rbac.NewRule("list", "watch", "get").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||||
|
eventsRule(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
return controllerRoles, controllerRoleBindings
|
return controllerRoles, controllerRoleBindings
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ limitations under the License.
|
|||||||
k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto
|
k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto
|
||||||
|
|
||||||
It has these top-level messages:
|
It has these top-level messages:
|
||||||
|
AllowedFlexVolume
|
||||||
AllowedHostPath
|
AllowedHostPath
|
||||||
CustomMetricCurrentStatus
|
CustomMetricCurrentStatus
|
||||||
CustomMetricCurrentStatusList
|
CustomMetricCurrentStatusList
|
||||||
@@ -113,253 +114,258 @@ var _ = math.Inf
|
|||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} }
|
||||||
|
func (*AllowedFlexVolume) ProtoMessage() {}
|
||||||
|
func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} }
|
||||||
|
|
||||||
func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} }
|
func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} }
|
||||||
func (*AllowedHostPath) ProtoMessage() {}
|
func (*AllowedHostPath) ProtoMessage() {}
|
||||||
func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} }
|
func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} }
|
||||||
|
|
||||||
func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} }
|
func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} }
|
||||||
func (*CustomMetricCurrentStatus) ProtoMessage() {}
|
func (*CustomMetricCurrentStatus) ProtoMessage() {}
|
||||||
func (*CustomMetricCurrentStatus) Descriptor() ([]byte, []int) {
|
func (*CustomMetricCurrentStatus) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptorGenerated, []int{1}
|
return fileDescriptorGenerated, []int{2}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} }
|
func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} }
|
||||||
func (*CustomMetricCurrentStatusList) ProtoMessage() {}
|
func (*CustomMetricCurrentStatusList) ProtoMessage() {}
|
||||||
func (*CustomMetricCurrentStatusList) Descriptor() ([]byte, []int) {
|
func (*CustomMetricCurrentStatusList) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptorGenerated, []int{2}
|
return fileDescriptorGenerated, []int{3}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} }
|
func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} }
|
||||||
func (*CustomMetricTarget) ProtoMessage() {}
|
func (*CustomMetricTarget) ProtoMessage() {}
|
||||||
func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} }
|
func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} }
|
||||||
|
|
||||||
func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} }
|
func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} }
|
||||||
func (*CustomMetricTargetList) ProtoMessage() {}
|
func (*CustomMetricTargetList) ProtoMessage() {}
|
||||||
func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} }
|
func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} }
|
||||||
|
|
||||||
func (m *DaemonSet) Reset() { *m = DaemonSet{} }
|
func (m *DaemonSet) Reset() { *m = DaemonSet{} }
|
||||||
func (*DaemonSet) ProtoMessage() {}
|
func (*DaemonSet) ProtoMessage() {}
|
||||||
func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} }
|
func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} }
|
||||||
|
|
||||||
func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} }
|
func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} }
|
||||||
func (*DaemonSetCondition) ProtoMessage() {}
|
func (*DaemonSetCondition) ProtoMessage() {}
|
||||||
func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} }
|
func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} }
|
||||||
|
|
||||||
func (m *DaemonSetList) Reset() { *m = DaemonSetList{} }
|
func (m *DaemonSetList) Reset() { *m = DaemonSetList{} }
|
||||||
func (*DaemonSetList) ProtoMessage() {}
|
func (*DaemonSetList) ProtoMessage() {}
|
||||||
func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} }
|
func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} }
|
||||||
|
|
||||||
func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} }
|
func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} }
|
||||||
func (*DaemonSetSpec) ProtoMessage() {}
|
func (*DaemonSetSpec) ProtoMessage() {}
|
||||||
func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} }
|
func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} }
|
||||||
|
|
||||||
func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} }
|
func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} }
|
||||||
func (*DaemonSetStatus) ProtoMessage() {}
|
func (*DaemonSetStatus) ProtoMessage() {}
|
||||||
func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} }
|
func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} }
|
||||||
|
|
||||||
func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} }
|
func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} }
|
||||||
func (*DaemonSetUpdateStrategy) ProtoMessage() {}
|
func (*DaemonSetUpdateStrategy) ProtoMessage() {}
|
||||||
func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) {
|
func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptorGenerated, []int{10}
|
return fileDescriptorGenerated, []int{11}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Deployment) Reset() { *m = Deployment{} }
|
func (m *Deployment) Reset() { *m = Deployment{} }
|
||||||
func (*Deployment) ProtoMessage() {}
|
func (*Deployment) ProtoMessage() {}
|
||||||
func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} }
|
func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} }
|
||||||
|
|
||||||
func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
|
func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
|
||||||
func (*DeploymentCondition) ProtoMessage() {}
|
func (*DeploymentCondition) ProtoMessage() {}
|
||||||
func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} }
|
func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} }
|
||||||
|
|
||||||
func (m *DeploymentList) Reset() { *m = DeploymentList{} }
|
func (m *DeploymentList) Reset() { *m = DeploymentList{} }
|
||||||
func (*DeploymentList) ProtoMessage() {}
|
func (*DeploymentList) ProtoMessage() {}
|
||||||
func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} }
|
func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} }
|
||||||
|
|
||||||
func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} }
|
func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} }
|
||||||
func (*DeploymentRollback) ProtoMessage() {}
|
func (*DeploymentRollback) ProtoMessage() {}
|
||||||
func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} }
|
func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} }
|
||||||
|
|
||||||
func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
|
func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
|
||||||
func (*DeploymentSpec) ProtoMessage() {}
|
func (*DeploymentSpec) ProtoMessage() {}
|
||||||
func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} }
|
func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} }
|
||||||
|
|
||||||
func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
|
func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
|
||||||
func (*DeploymentStatus) ProtoMessage() {}
|
func (*DeploymentStatus) ProtoMessage() {}
|
||||||
func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} }
|
func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} }
|
||||||
|
|
||||||
func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
|
func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
|
||||||
func (*DeploymentStrategy) ProtoMessage() {}
|
func (*DeploymentStrategy) ProtoMessage() {}
|
||||||
func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} }
|
func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} }
|
||||||
|
|
||||||
func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} }
|
func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} }
|
||||||
func (*FSGroupStrategyOptions) ProtoMessage() {}
|
func (*FSGroupStrategyOptions) ProtoMessage() {}
|
||||||
func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} }
|
func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} }
|
||||||
|
|
||||||
func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} }
|
func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} }
|
||||||
func (*HTTPIngressPath) ProtoMessage() {}
|
func (*HTTPIngressPath) ProtoMessage() {}
|
||||||
func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} }
|
func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} }
|
||||||
|
|
||||||
func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} }
|
func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} }
|
||||||
func (*HTTPIngressRuleValue) ProtoMessage() {}
|
func (*HTTPIngressRuleValue) ProtoMessage() {}
|
||||||
func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} }
|
func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} }
|
||||||
|
|
||||||
func (m *HostPortRange) Reset() { *m = HostPortRange{} }
|
func (m *HostPortRange) Reset() { *m = HostPortRange{} }
|
||||||
func (*HostPortRange) ProtoMessage() {}
|
func (*HostPortRange) ProtoMessage() {}
|
||||||
func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} }
|
func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} }
|
||||||
|
|
||||||
func (m *IDRange) Reset() { *m = IDRange{} }
|
func (m *IDRange) Reset() { *m = IDRange{} }
|
||||||
func (*IDRange) ProtoMessage() {}
|
func (*IDRange) ProtoMessage() {}
|
||||||
func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} }
|
func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} }
|
||||||
|
|
||||||
func (m *IPBlock) Reset() { *m = IPBlock{} }
|
func (m *IPBlock) Reset() { *m = IPBlock{} }
|
||||||
func (*IPBlock) ProtoMessage() {}
|
func (*IPBlock) ProtoMessage() {}
|
||||||
func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} }
|
func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} }
|
||||||
|
|
||||||
func (m *Ingress) Reset() { *m = Ingress{} }
|
func (m *Ingress) Reset() { *m = Ingress{} }
|
||||||
func (*Ingress) ProtoMessage() {}
|
func (*Ingress) ProtoMessage() {}
|
||||||
func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} }
|
func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} }
|
||||||
|
|
||||||
func (m *IngressBackend) Reset() { *m = IngressBackend{} }
|
func (m *IngressBackend) Reset() { *m = IngressBackend{} }
|
||||||
func (*IngressBackend) ProtoMessage() {}
|
func (*IngressBackend) ProtoMessage() {}
|
||||||
func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} }
|
func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} }
|
||||||
|
|
||||||
func (m *IngressList) Reset() { *m = IngressList{} }
|
func (m *IngressList) Reset() { *m = IngressList{} }
|
||||||
func (*IngressList) ProtoMessage() {}
|
func (*IngressList) ProtoMessage() {}
|
||||||
func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} }
|
func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} }
|
||||||
|
|
||||||
func (m *IngressRule) Reset() { *m = IngressRule{} }
|
func (m *IngressRule) Reset() { *m = IngressRule{} }
|
||||||
func (*IngressRule) ProtoMessage() {}
|
func (*IngressRule) ProtoMessage() {}
|
||||||
func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} }
|
func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} }
|
||||||
|
|
||||||
func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} }
|
func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} }
|
||||||
func (*IngressRuleValue) ProtoMessage() {}
|
func (*IngressRuleValue) ProtoMessage() {}
|
||||||
func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} }
|
func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} }
|
||||||
|
|
||||||
func (m *IngressSpec) Reset() { *m = IngressSpec{} }
|
func (m *IngressSpec) Reset() { *m = IngressSpec{} }
|
||||||
func (*IngressSpec) ProtoMessage() {}
|
func (*IngressSpec) ProtoMessage() {}
|
||||||
func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} }
|
func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} }
|
||||||
|
|
||||||
func (m *IngressStatus) Reset() { *m = IngressStatus{} }
|
func (m *IngressStatus) Reset() { *m = IngressStatus{} }
|
||||||
func (*IngressStatus) ProtoMessage() {}
|
func (*IngressStatus) ProtoMessage() {}
|
||||||
func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} }
|
func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} }
|
||||||
|
|
||||||
func (m *IngressTLS) Reset() { *m = IngressTLS{} }
|
func (m *IngressTLS) Reset() { *m = IngressTLS{} }
|
||||||
func (*IngressTLS) ProtoMessage() {}
|
func (*IngressTLS) ProtoMessage() {}
|
||||||
func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} }
|
func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} }
|
||||||
|
|
||||||
func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} }
|
func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} }
|
||||||
func (*NetworkPolicy) ProtoMessage() {}
|
func (*NetworkPolicy) ProtoMessage() {}
|
||||||
func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} }
|
func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} }
|
||||||
|
|
||||||
func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} }
|
func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} }
|
||||||
func (*NetworkPolicyEgressRule) ProtoMessage() {}
|
func (*NetworkPolicyEgressRule) ProtoMessage() {}
|
||||||
func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) {
|
func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptorGenerated, []int{33}
|
return fileDescriptorGenerated, []int{34}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} }
|
func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} }
|
||||||
func (*NetworkPolicyIngressRule) ProtoMessage() {}
|
func (*NetworkPolicyIngressRule) ProtoMessage() {}
|
||||||
func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) {
|
func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptorGenerated, []int{34}
|
return fileDescriptorGenerated, []int{35}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} }
|
func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} }
|
||||||
func (*NetworkPolicyList) ProtoMessage() {}
|
func (*NetworkPolicyList) ProtoMessage() {}
|
||||||
func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} }
|
func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} }
|
||||||
|
|
||||||
func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} }
|
func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} }
|
||||||
func (*NetworkPolicyPeer) ProtoMessage() {}
|
func (*NetworkPolicyPeer) ProtoMessage() {}
|
||||||
func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} }
|
func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} }
|
||||||
|
|
||||||
func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} }
|
func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} }
|
||||||
func (*NetworkPolicyPort) ProtoMessage() {}
|
func (*NetworkPolicyPort) ProtoMessage() {}
|
||||||
func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} }
|
func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} }
|
||||||
|
|
||||||
func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} }
|
func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} }
|
||||||
func (*NetworkPolicySpec) ProtoMessage() {}
|
func (*NetworkPolicySpec) ProtoMessage() {}
|
||||||
func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} }
|
func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} }
|
||||||
|
|
||||||
func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} }
|
func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} }
|
||||||
func (*PodSecurityPolicy) ProtoMessage() {}
|
func (*PodSecurityPolicy) ProtoMessage() {}
|
||||||
func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} }
|
func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} }
|
||||||
|
|
||||||
func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} }
|
func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} }
|
||||||
func (*PodSecurityPolicyList) ProtoMessage() {}
|
func (*PodSecurityPolicyList) ProtoMessage() {}
|
||||||
func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} }
|
func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} }
|
||||||
|
|
||||||
func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} }
|
func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} }
|
||||||
func (*PodSecurityPolicySpec) ProtoMessage() {}
|
func (*PodSecurityPolicySpec) ProtoMessage() {}
|
||||||
func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} }
|
func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} }
|
||||||
|
|
||||||
func (m *ReplicaSet) Reset() { *m = ReplicaSet{} }
|
func (m *ReplicaSet) Reset() { *m = ReplicaSet{} }
|
||||||
func (*ReplicaSet) ProtoMessage() {}
|
func (*ReplicaSet) ProtoMessage() {}
|
||||||
func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} }
|
func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} }
|
||||||
|
|
||||||
func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} }
|
func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} }
|
||||||
func (*ReplicaSetCondition) ProtoMessage() {}
|
func (*ReplicaSetCondition) ProtoMessage() {}
|
||||||
func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} }
|
func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} }
|
||||||
|
|
||||||
func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} }
|
func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} }
|
||||||
func (*ReplicaSetList) ProtoMessage() {}
|
func (*ReplicaSetList) ProtoMessage() {}
|
||||||
func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} }
|
func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} }
|
||||||
|
|
||||||
func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} }
|
func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} }
|
||||||
func (*ReplicaSetSpec) ProtoMessage() {}
|
func (*ReplicaSetSpec) ProtoMessage() {}
|
||||||
func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} }
|
func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} }
|
||||||
|
|
||||||
func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} }
|
func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} }
|
||||||
func (*ReplicaSetStatus) ProtoMessage() {}
|
func (*ReplicaSetStatus) ProtoMessage() {}
|
||||||
func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} }
|
func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} }
|
||||||
|
|
||||||
func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} }
|
func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} }
|
||||||
func (*ReplicationControllerDummy) ProtoMessage() {}
|
func (*ReplicationControllerDummy) ProtoMessage() {}
|
||||||
func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) {
|
func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptorGenerated, []int{47}
|
return fileDescriptorGenerated, []int{48}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RollbackConfig) Reset() { *m = RollbackConfig{} }
|
func (m *RollbackConfig) Reset() { *m = RollbackConfig{} }
|
||||||
func (*RollbackConfig) ProtoMessage() {}
|
func (*RollbackConfig) ProtoMessage() {}
|
||||||
func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} }
|
func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} }
|
||||||
|
|
||||||
func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} }
|
func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} }
|
||||||
func (*RollingUpdateDaemonSet) ProtoMessage() {}
|
func (*RollingUpdateDaemonSet) ProtoMessage() {}
|
||||||
func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} }
|
func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} }
|
||||||
|
|
||||||
func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
|
func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
|
||||||
func (*RollingUpdateDeployment) ProtoMessage() {}
|
func (*RollingUpdateDeployment) ProtoMessage() {}
|
||||||
func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) {
|
func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptorGenerated, []int{50}
|
return fileDescriptorGenerated, []int{51}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} }
|
func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} }
|
||||||
func (*RunAsUserStrategyOptions) ProtoMessage() {}
|
func (*RunAsUserStrategyOptions) ProtoMessage() {}
|
||||||
func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) {
|
func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptorGenerated, []int{51}
|
return fileDescriptorGenerated, []int{52}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} }
|
func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} }
|
||||||
func (*SELinuxStrategyOptions) ProtoMessage() {}
|
func (*SELinuxStrategyOptions) ProtoMessage() {}
|
||||||
func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} }
|
func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} }
|
||||||
|
|
||||||
func (m *Scale) Reset() { *m = Scale{} }
|
func (m *Scale) Reset() { *m = Scale{} }
|
||||||
func (*Scale) ProtoMessage() {}
|
func (*Scale) ProtoMessage() {}
|
||||||
func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} }
|
func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} }
|
||||||
|
|
||||||
func (m *ScaleSpec) Reset() { *m = ScaleSpec{} }
|
func (m *ScaleSpec) Reset() { *m = ScaleSpec{} }
|
||||||
func (*ScaleSpec) ProtoMessage() {}
|
func (*ScaleSpec) ProtoMessage() {}
|
||||||
func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} }
|
func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} }
|
||||||
|
|
||||||
func (m *ScaleStatus) Reset() { *m = ScaleStatus{} }
|
func (m *ScaleStatus) Reset() { *m = ScaleStatus{} }
|
||||||
func (*ScaleStatus) ProtoMessage() {}
|
func (*ScaleStatus) ProtoMessage() {}
|
||||||
func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} }
|
func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} }
|
||||||
|
|
||||||
func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} }
|
func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} }
|
||||||
func (*SupplementalGroupsStrategyOptions) ProtoMessage() {}
|
func (*SupplementalGroupsStrategyOptions) ProtoMessage() {}
|
||||||
func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) {
|
func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptorGenerated, []int{56}
|
return fileDescriptorGenerated, []int{57}
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume")
|
||||||
proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath")
|
proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath")
|
||||||
proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatus")
|
proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatus")
|
||||||
proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatusList")
|
proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatusList")
|
||||||
@@ -418,6 +424,28 @@ func init() {
|
|||||||
proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus")
|
proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus")
|
||||||
proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions")
|
proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions")
|
||||||
}
|
}
|
||||||
|
func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
|
||||||
|
i += copy(dAtA[i:], m.Driver)
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) {
|
func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
@@ -2207,6 +2235,20 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
i += n
|
i += n
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(m.AllowedFlexVolumes) > 0 {
|
||||||
|
for _, msg := range m.AllowedFlexVolumes {
|
||||||
|
dAtA[i] = 0x92
|
||||||
|
i++
|
||||||
|
dAtA[i] = 0x1
|
||||||
|
i++
|
||||||
|
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
|
||||||
|
n, err := msg.MarshalTo(dAtA[i:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
i += n
|
||||||
|
}
|
||||||
|
}
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2763,6 +2805,14 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
|||||||
dAtA[offset] = uint8(v)
|
dAtA[offset] = uint8(v)
|
||||||
return offset + 1
|
return offset + 1
|
||||||
}
|
}
|
||||||
|
func (m *AllowedFlexVolume) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.Driver)
|
||||||
|
n += 1 + l + sovGenerated(uint64(l))
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
func (m *AllowedHostPath) Size() (n int) {
|
func (m *AllowedHostPath) Size() (n int) {
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
@@ -3401,6 +3451,12 @@ func (m *PodSecurityPolicySpec) Size() (n int) {
|
|||||||
n += 2 + l + sovGenerated(uint64(l))
|
n += 2 + l + sovGenerated(uint64(l))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(m.AllowedFlexVolumes) > 0 {
|
||||||
|
for _, e := range m.AllowedFlexVolumes {
|
||||||
|
l = e.Size()
|
||||||
|
n += 2 + l + sovGenerated(uint64(l))
|
||||||
|
}
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3605,6 +3661,16 @@ func sovGenerated(x uint64) (n int) {
|
|||||||
func sozGenerated(x uint64) (n int) {
|
func sozGenerated(x uint64) (n int) {
|
||||||
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
}
|
}
|
||||||
|
func (this *AllowedFlexVolume) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&AllowedFlexVolume{`,
|
||||||
|
`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
func (this *AllowedHostPath) String() string {
|
func (this *AllowedHostPath) String() string {
|
||||||
if this == nil {
|
if this == nil {
|
||||||
return "nil"
|
return "nil"
|
||||||
@@ -4122,6 +4188,7 @@ func (this *PodSecurityPolicySpec) String() string {
|
|||||||
`DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`,
|
`DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`,
|
||||||
`AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`,
|
`AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`,
|
||||||
`AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`,
|
`AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`,
|
||||||
|
`AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`,
|
||||||
`}`,
|
`}`,
|
||||||
}, "")
|
}, "")
|
||||||
return s
|
return s
|
||||||
@@ -4316,6 +4383,85 @@ func valueToStringGenerated(v interface{}) string {
|
|||||||
pv := reflect.Indirect(rv).Interface()
|
pv := reflect.Indirect(rv).Interface()
|
||||||
return fmt.Sprintf("*%v", pv)
|
return fmt.Sprintf("*%v", pv)
|
||||||
}
|
}
|
||||||
|
func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Driver = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
func (m *AllowedHostPath) Unmarshal(dAtA []byte) error {
|
func (m *AllowedHostPath) Unmarshal(dAtA []byte) error {
|
||||||
l := len(dAtA)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
@@ -10129,6 +10275,37 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
iNdEx = postIndex
|
iNdEx = postIndex
|
||||||
|
case 18:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType)
|
||||||
|
}
|
||||||
|
var msglen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowGenerated
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
msglen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if msglen < 0 {
|
||||||
|
return ErrInvalidLengthGenerated
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + msglen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{})
|
||||||
|
if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
default:
|
default:
|
||||||
iNdEx = preIndex
|
iNdEx = preIndex
|
||||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||||
@@ -12108,226 +12285,229 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var fileDescriptorGenerated = []byte{
|
var fileDescriptorGenerated = []byte{
|
||||||
// 3523 bytes of a gzipped FileDescriptorProto
|
// 3571 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x1c, 0x47,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x1c, 0x47,
|
||||||
0x76, 0x57, 0xcf, 0x0c, 0x39, 0xc3, 0x47, 0xf1, 0xab, 0x28, 0x93, 0x63, 0xca, 0xe2, 0xc8, 0x6d,
|
0x76, 0x57, 0xcf, 0x0c, 0x39, 0xc3, 0x47, 0xf1, 0xab, 0x28, 0x93, 0x63, 0xca, 0xe2, 0xc8, 0x6d,
|
||||||
0x40, 0x91, 0x1c, 0x69, 0xc6, 0x92, 0x2d, 0x59, 0xb1, 0x10, 0x3b, 0x1c, 0x52, 0x1f, 0x74, 0xf8,
|
0x40, 0x91, 0x1c, 0x69, 0xc6, 0x92, 0x2d, 0x59, 0xb1, 0x10, 0x3b, 0x1c, 0x52, 0x1f, 0x74, 0xf8,
|
||||||
0xa5, 0x1a, 0x52, 0x71, 0x8c, 0xc8, 0x71, 0x73, 0xa6, 0x38, 0x6c, 0xb1, 0xa7, 0xbb, 0xdd, 0x5d,
|
0xa5, 0x1a, 0x52, 0x71, 0x8c, 0xc8, 0x71, 0x73, 0xa6, 0x38, 0x6c, 0xb1, 0xa7, 0xbb, 0xdd, 0x5d,
|
||||||
0x4d, 0x73, 0x2e, 0x41, 0x0e, 0x41, 0x80, 0x00, 0x09, 0x92, 0x1c, 0x9c, 0x38, 0xb7, 0xf8, 0x92,
|
0x4d, 0x73, 0x80, 0x20, 0xc8, 0x21, 0x08, 0x10, 0x20, 0x41, 0x92, 0x83, 0xf3, 0x71, 0x8b, 0x2f,
|
||||||
0x53, 0x82, 0xe4, 0x96, 0x1c, 0x0c, 0x03, 0x01, 0x1c, 0x40, 0x08, 0xbc, 0x80, 0x6f, 0xeb, 0x13,
|
0x39, 0x25, 0x48, 0x6e, 0xc9, 0xc1, 0x30, 0x10, 0xc0, 0x0b, 0x08, 0x0b, 0x2f, 0xe0, 0xdb, 0xfa,
|
||||||
0xb1, 0xa6, 0x4f, 0x8b, 0xfd, 0x07, 0x16, 0x3a, 0x2c, 0x16, 0x55, 0x5d, 0xfd, 0xdd, 0xcd, 0x99,
|
0x44, 0xac, 0xe9, 0xd3, 0x62, 0xff, 0x81, 0x85, 0x0e, 0x8b, 0x45, 0x55, 0x57, 0x7f, 0x77, 0x73,
|
||||||
0xa1, 0x25, 0x62, 0xb1, 0xd8, 0x1b, 0xa7, 0xde, 0x7b, 0xbf, 0xf7, 0xea, 0xd5, 0xab, 0xf7, 0x5e,
|
0x66, 0x68, 0x89, 0x58, 0x2c, 0xf6, 0xc6, 0xa9, 0xf7, 0xde, 0xef, 0xbd, 0x7a, 0xf5, 0xea, 0xbd,
|
||||||
0x57, 0x15, 0xe1, 0xde, 0xde, 0x6d, 0xbb, 0xaa, 0x1a, 0xb5, 0x3d, 0x67, 0x9b, 0x58, 0x3a, 0xa1,
|
0xd7, 0x55, 0x45, 0xb8, 0xb7, 0x77, 0xdb, 0xae, 0xaa, 0x46, 0x6d, 0xcf, 0xd9, 0x26, 0x96, 0x4e,
|
||||||
0xc4, 0xae, 0xed, 0x13, 0xbd, 0x65, 0x58, 0x35, 0x41, 0x50, 0x4c, 0xb5, 0x46, 0x0e, 0x28, 0xd1,
|
0x28, 0xb1, 0x6b, 0xfb, 0x44, 0x6f, 0x19, 0x56, 0x4d, 0x10, 0x14, 0x53, 0xad, 0x91, 0x03, 0x4a,
|
||||||
0x6d, 0xd5, 0xd0, 0xed, 0xda, 0xfe, 0xf5, 0x6d, 0x42, 0x95, 0xeb, 0xb5, 0x36, 0xd1, 0x89, 0xa5,
|
0x74, 0x5b, 0x35, 0x74, 0xbb, 0xb6, 0x7f, 0x7d, 0x9b, 0x50, 0xe5, 0x7a, 0xad, 0x4d, 0x74, 0x62,
|
||||||
0x50, 0xd2, 0xaa, 0x9a, 0x96, 0x41, 0x0d, 0x74, 0xc1, 0x65, 0xaf, 0x2a, 0xa6, 0x5a, 0x0d, 0xd8,
|
0x29, 0x94, 0xb4, 0xaa, 0xa6, 0x65, 0x50, 0x03, 0x5d, 0x70, 0xd9, 0xab, 0x8a, 0xa9, 0x56, 0x03,
|
||||||
0xab, 0x82, 0x7d, 0xee, 0x5a, 0x5b, 0xa5, 0xbb, 0xce, 0x76, 0xb5, 0x69, 0x74, 0x6a, 0x6d, 0xa3,
|
0xf6, 0xaa, 0x60, 0x9f, 0xbb, 0xd6, 0x56, 0xe9, 0xae, 0xb3, 0x5d, 0x6d, 0x1a, 0x9d, 0x5a, 0xdb,
|
||||||
0x6d, 0xd4, 0xb8, 0xd4, 0xb6, 0xb3, 0xc3, 0x7f, 0xf1, 0x1f, 0xfc, 0x2f, 0x17, 0x6d, 0x4e, 0x0e,
|
0x68, 0x1b, 0x35, 0x2e, 0xb5, 0xed, 0xec, 0xf0, 0x5f, 0xfc, 0x07, 0xff, 0xcb, 0x45, 0x9b, 0x93,
|
||||||
0x29, 0x6f, 0x1a, 0x16, 0xa9, 0xed, 0x27, 0x34, 0xce, 0x5d, 0x09, 0xf1, 0x98, 0x86, 0xa6, 0x36,
|
0x43, 0xca, 0x9b, 0x86, 0x45, 0x6a, 0xfb, 0x09, 0x8d, 0x73, 0x57, 0x42, 0x3c, 0xa6, 0xa1, 0xa9,
|
||||||
0xbb, 0x59, 0xc6, 0xcd, 0xbd, 0x15, 0xb0, 0x76, 0x94, 0xe6, 0xae, 0xaa, 0x13, 0xab, 0x5b, 0x33,
|
0xcd, 0x6e, 0x96, 0x71, 0x73, 0x6f, 0x05, 0xac, 0x1d, 0xa5, 0xb9, 0xab, 0xea, 0xc4, 0xea, 0xd6,
|
||||||
0xf7, 0xda, 0x5c, 0xd6, 0x22, 0xb6, 0xe1, 0x58, 0x4d, 0x32, 0x90, 0x94, 0x5d, 0xeb, 0x10, 0xaa,
|
0xcc, 0xbd, 0x36, 0x97, 0xb5, 0x88, 0x6d, 0x38, 0x56, 0x93, 0x0c, 0x24, 0x65, 0xd7, 0x3a, 0x84,
|
||||||
0xa4, 0x99, 0x55, 0xcb, 0x92, 0xb2, 0x1c, 0x9d, 0xaa, 0x9d, 0xa4, 0x9a, 0x5b, 0xbd, 0x04, 0xec,
|
0x2a, 0x69, 0x66, 0xd5, 0xb2, 0xa4, 0x2c, 0x47, 0xa7, 0x6a, 0x27, 0xa9, 0xe6, 0x56, 0x2f, 0x01,
|
||||||
0xe6, 0x2e, 0xe9, 0x28, 0x09, 0xb9, 0x37, 0xb3, 0xe4, 0x1c, 0xaa, 0x6a, 0x35, 0x55, 0xa7, 0x36,
|
0xbb, 0xb9, 0x4b, 0x3a, 0x4a, 0x42, 0xee, 0xcd, 0x2c, 0x39, 0x87, 0xaa, 0x5a, 0x4d, 0xd5, 0xa9,
|
||||||
0xb5, 0xe2, 0x42, 0xf2, 0x5d, 0x98, 0x58, 0xd0, 0x34, 0xe3, 0x53, 0xd2, 0x7a, 0x60, 0xd8, 0x74,
|
0x4d, 0xad, 0xb8, 0x90, 0x7c, 0x07, 0xa6, 0x16, 0x34, 0xcd, 0xf8, 0x94, 0xb4, 0xee, 0x69, 0xe4,
|
||||||
0x43, 0xa1, 0xbb, 0xe8, 0x06, 0x80, 0xa9, 0xd0, 0xdd, 0x0d, 0x8b, 0xec, 0xa8, 0x07, 0x65, 0xe9,
|
0xe0, 0x91, 0xa1, 0x39, 0x1d, 0x82, 0x2e, 0xc1, 0x70, 0xcb, 0x52, 0xf7, 0x89, 0x55, 0x96, 0x2e,
|
||||||
0xa2, 0x74, 0x79, 0xa4, 0x8e, 0x9e, 0x1e, 0x56, 0xce, 0x1c, 0x1d, 0x56, 0x60, 0xc3, 0xa7, 0xe0,
|
0x4a, 0x97, 0x47, 0xea, 0xe3, 0x4f, 0x0f, 0x2b, 0x67, 0x8e, 0x0e, 0x2b, 0xc3, 0x4b, 0x7c, 0x14,
|
||||||
0x10, 0x97, 0xfc, 0x2f, 0x12, 0xbc, 0xbc, 0xe8, 0xd8, 0xd4, 0xe8, 0xac, 0x12, 0x6a, 0xa9, 0xcd,
|
0x0b, 0xaa, 0x7c, 0x17, 0x26, 0x84, 0xf0, 0x03, 0xc3, 0xa6, 0x1b, 0x0a, 0xdd, 0x45, 0x37, 0x00,
|
||||||
0x45, 0xc7, 0xb2, 0x88, 0x4e, 0x1b, 0x54, 0xa1, 0x8e, 0x8d, 0x2e, 0x42, 0x41, 0x57, 0x3a, 0x44,
|
0x4c, 0x85, 0xee, 0x6e, 0x58, 0x64, 0x47, 0x3d, 0x10, 0xe2, 0x48, 0x88, 0xc3, 0x86, 0x4f, 0xc1,
|
||||||
0x60, 0x9d, 0x15, 0x58, 0x85, 0x35, 0xa5, 0x43, 0x30, 0xa7, 0xa0, 0x0f, 0x61, 0x68, 0x5f, 0xd1,
|
0x21, 0x2e, 0xf9, 0xdf, 0x24, 0x78, 0x79, 0xd1, 0xb1, 0xa9, 0xd1, 0x59, 0x25, 0xd4, 0x52, 0x9b,
|
||||||
0x1c, 0x52, 0xce, 0x5d, 0x94, 0x2e, 0x8f, 0xde, 0xa8, 0x56, 0x83, 0xe8, 0xf1, 0xe7, 0x52, 0x35,
|
0x8b, 0x8e, 0x65, 0x11, 0x9d, 0x36, 0xa8, 0x42, 0x1d, 0x1b, 0x5d, 0x84, 0x82, 0xae, 0x74, 0x88,
|
||||||
0xf7, 0xda, 0x3c, 0x9c, 0xbc, 0x05, 0xaa, 0x3e, 0x74, 0x14, 0x9d, 0xaa, 0xb4, 0x5b, 0x3f, 0x27,
|
0xc0, 0x3a, 0x2b, 0xb0, 0x0a, 0x6b, 0x4a, 0x87, 0x60, 0x4e, 0x41, 0x1f, 0xc2, 0xd0, 0xbe, 0xa2,
|
||||||
0x20, 0xcf, 0x0a, 0xbd, 0x8f, 0x18, 0x16, 0x76, 0x21, 0xe5, 0xbf, 0x80, 0x0b, 0x99, 0xa6, 0xad,
|
0x39, 0xa4, 0x9c, 0xbb, 0x28, 0x5d, 0x1e, 0xbd, 0x51, 0xad, 0x06, 0xa1, 0xe7, 0x3b, 0xa2, 0x6a,
|
||||||
0xa8, 0x36, 0x45, 0x8f, 0x61, 0x48, 0xa5, 0xa4, 0x63, 0x97, 0xa5, 0x8b, 0xf9, 0xcb, 0xa3, 0x37,
|
0xee, 0xb5, 0x79, 0x2c, 0x7a, 0xab, 0x5b, 0x7d, 0xe8, 0x28, 0x3a, 0x55, 0x69, 0xb7, 0x7e, 0x4e,
|
||||||
0x6e, 0x57, 0x8f, 0x0d, 0xdd, 0x6a, 0x26, 0x58, 0x7d, 0x4c, 0x98, 0x31, 0xb4, 0xcc, 0xe0, 0xb0,
|
0x40, 0x9e, 0x15, 0x7a, 0x1f, 0x31, 0x2c, 0xec, 0x42, 0xca, 0x7f, 0x09, 0x17, 0x32, 0x4d, 0x5b,
|
||||||
0x8b, 0x2a, 0xff, 0xa3, 0x04, 0x28, 0x2c, 0xb3, 0xa9, 0x58, 0x6d, 0x42, 0xfb, 0x70, 0xca, 0x9f,
|
0x51, 0x6d, 0x8a, 0x1e, 0xc3, 0x90, 0x4a, 0x49, 0xc7, 0x2e, 0x4b, 0x17, 0xf3, 0x97, 0x47, 0x6f,
|
||||||
0xfe, 0x38, 0xa7, 0x4c, 0x0b, 0xc8, 0x51, 0x57, 0x61, 0xc4, 0x27, 0x26, 0xcc, 0x24, 0x4d, 0xe2,
|
0xdc, 0xae, 0x1e, 0x1b, 0xf7, 0xd5, 0x4c, 0xb0, 0xfa, 0x98, 0x30, 0x63, 0x68, 0x99, 0xc1, 0x61,
|
||||||
0xce, 0x78, 0x14, 0x75, 0xc6, 0xf5, 0x01, 0x9c, 0xe1, 0xa2, 0x64, 0x78, 0xe1, 0xb3, 0x1c, 0x8c,
|
0x17, 0x55, 0xfe, 0x27, 0x09, 0x50, 0x58, 0x66, 0x53, 0xb1, 0xda, 0x84, 0xf6, 0xe1, 0x94, 0x3f,
|
||||||
0x2c, 0x29, 0xa4, 0x63, 0xe8, 0x0d, 0x42, 0xd1, 0xc7, 0x50, 0x62, 0xfb, 0xa5, 0xa5, 0x50, 0x85,
|
0xfd, 0x61, 0x4e, 0x99, 0x16, 0x90, 0xa3, 0xae, 0xc2, 0x88, 0x4f, 0x4c, 0x98, 0x49, 0x9a, 0xc4,
|
||||||
0x3b, 0x60, 0xf4, 0xc6, 0x1b, 0xc7, 0xcd, 0xce, 0xae, 0x32, 0xee, 0xea, 0xfe, 0xf5, 0xea, 0xfa,
|
0x9d, 0xf1, 0x28, 0xea, 0x8c, 0xeb, 0x03, 0x38, 0xc3, 0x45, 0xc9, 0xf0, 0xc2, 0x67, 0x39, 0x18,
|
||||||
0xf6, 0x13, 0xd2, 0xa4, 0xab, 0x84, 0x2a, 0x41, 0x4c, 0x06, 0x63, 0xd8, 0x47, 0x45, 0x6b, 0x50,
|
0x59, 0x52, 0x48, 0xc7, 0xd0, 0x1b, 0x84, 0xa2, 0x8f, 0xa1, 0xc4, 0x36, 0x5b, 0x4b, 0xa1, 0x0a,
|
||||||
0xb0, 0x4d, 0xd2, 0x14, 0xbe, 0xbb, 0xda, 0x63, 0x1a, 0xbe, 0x65, 0x0d, 0x93, 0x34, 0x83, 0xc5,
|
0x77, 0xc0, 0xe8, 0x8d, 0x37, 0x8e, 0x9b, 0x9d, 0x5d, 0x65, 0xdc, 0xd5, 0xfd, 0xeb, 0xd5, 0xf5,
|
||||||
0x60, 0xbf, 0x30, 0xc7, 0x41, 0x8f, 0x60, 0xd8, 0xe6, 0xab, 0x5c, 0xce, 0x27, 0x56, 0xe3, 0x78,
|
0xed, 0x27, 0xa4, 0x49, 0x57, 0x09, 0x55, 0x82, 0x98, 0x0c, 0xc6, 0xb0, 0x8f, 0x8a, 0xd6, 0xa0,
|
||||||
0x44, 0x37, 0x36, 0xc6, 0x05, 0xe6, 0xb0, 0xfb, 0x1b, 0x0b, 0x34, 0xf9, 0xe7, 0x39, 0x40, 0x3e,
|
0x60, 0x9b, 0xa4, 0x29, 0x7c, 0x77, 0xb5, 0xc7, 0x34, 0x7c, 0xcb, 0x1a, 0x26, 0x69, 0x06, 0x8b,
|
||||||
0xef, 0xa2, 0xa1, 0xb7, 0x54, 0xaa, 0x1a, 0x3a, 0x7a, 0x07, 0x0a, 0xb4, 0x6b, 0x7a, 0xd1, 0x71,
|
0xc1, 0x7e, 0x61, 0x8e, 0x83, 0x1e, 0xc1, 0xb0, 0xcd, 0x57, 0xb9, 0x9c, 0x4f, 0xac, 0xc6, 0xf1,
|
||||||
0xc9, 0x33, 0x68, 0xb3, 0x6b, 0x92, 0x67, 0x87, 0x95, 0x99, 0xa4, 0x04, 0xa3, 0x60, 0x2e, 0x83,
|
0x88, 0x6e, 0x6c, 0xf8, 0x1b, 0xd0, 0xfd, 0x8d, 0x05, 0x9a, 0xfc, 0xf3, 0x1c, 0x20, 0x9f, 0x77,
|
||||||
0x56, 0x7c, 0x53, 0x73, 0x5c, 0xfa, 0xad, 0xa8, 0xea, 0x67, 0x87, 0x95, 0x94, 0x74, 0x5a, 0xf5,
|
0xd1, 0xd0, 0x5b, 0x2a, 0x55, 0x0d, 0x1d, 0xbd, 0x03, 0x05, 0xda, 0x35, 0xbd, 0xe8, 0xb8, 0xe4,
|
||||||
0x91, 0xa2, 0x06, 0xa2, 0x7d, 0x40, 0x9a, 0x62, 0xd3, 0x4d, 0x4b, 0xd1, 0x6d, 0x57, 0x93, 0xda,
|
0x19, 0xb4, 0xd9, 0x35, 0xc9, 0xb3, 0xc3, 0xca, 0x4c, 0x52, 0x82, 0x51, 0x30, 0x97, 0x41, 0x2b,
|
||||||
0x21, 0xc2, 0x09, 0xaf, 0xf7, 0xb7, 0x68, 0x4c, 0xa2, 0x3e, 0x27, 0xac, 0x40, 0x2b, 0x09, 0x34,
|
0xbe, 0xa9, 0x39, 0x2e, 0xfd, 0x56, 0x54, 0xf5, 0xb3, 0xc3, 0x4a, 0x4a, 0x2e, 0xae, 0xfa, 0x48,
|
||||||
0x9c, 0xa2, 0x01, 0x5d, 0x82, 0x61, 0x8b, 0x28, 0xb6, 0xa1, 0x97, 0x0b, 0x7c, 0x16, 0xbe, 0x03,
|
0x51, 0x03, 0xd1, 0x3e, 0x20, 0x4d, 0xb1, 0xe9, 0xa6, 0xa5, 0xe8, 0xb6, 0xab, 0x49, 0xed, 0x10,
|
||||||
0x31, 0x1f, 0xc5, 0x82, 0x8a, 0xae, 0x40, 0xb1, 0x43, 0x6c, 0x5b, 0x69, 0x93, 0xf2, 0x10, 0x67,
|
0xe1, 0x84, 0xd7, 0xfb, 0x5b, 0x34, 0x26, 0x51, 0x9f, 0x13, 0x56, 0xa0, 0x95, 0x04, 0x1a, 0x4e,
|
||||||
0x9c, 0x10, 0x8c, 0xc5, 0x55, 0x77, 0x18, 0x7b, 0x74, 0xf9, 0x4b, 0x09, 0xc6, 0x7c, 0xcf, 0xf1,
|
0xd1, 0xc0, 0x32, 0x98, 0x45, 0x14, 0xdb, 0xd0, 0xcb, 0x85, 0x68, 0x06, 0xc3, 0x7c, 0x14, 0x0b,
|
||||||
0x68, 0xff, 0xb3, 0x44, 0x1c, 0x56, 0xfb, 0x9b, 0x12, 0x93, 0xe6, 0x51, 0x38, 0x29, 0xb4, 0x95,
|
0x2a, 0xba, 0x02, 0xc5, 0x0e, 0xb1, 0x6d, 0xa5, 0x4d, 0xca, 0x43, 0x9c, 0x71, 0x42, 0x30, 0x16,
|
||||||
0xbc, 0x91, 0x50, 0x0c, 0xae, 0x7a, 0x7b, 0x29, 0xc7, 0xf7, 0xd2, 0xe5, 0x7e, 0x43, 0x26, 0x63,
|
0x57, 0xdd, 0x61, 0xec, 0xd1, 0xe5, 0x2f, 0x24, 0x18, 0xf3, 0x3d, 0xc7, 0xa3, 0xfd, 0xcf, 0x12,
|
||||||
0x0b, 0xfd, 0x53, 0x21, 0x64, 0x3e, 0x0b, 0x4d, 0xf4, 0x18, 0x4a, 0x36, 0xd1, 0x48, 0x93, 0x1a,
|
0x71, 0x58, 0xed, 0x6f, 0x4a, 0x4c, 0x9a, 0x47, 0xe1, 0xa4, 0xd0, 0x56, 0xf2, 0x46, 0x42, 0x31,
|
||||||
0x96, 0x30, 0xff, 0xcd, 0x3e, 0xcd, 0x57, 0xb6, 0x89, 0xd6, 0x10, 0xa2, 0xf5, 0xb3, 0xcc, 0x7e,
|
0xb8, 0xea, 0xed, 0xa5, 0x1c, 0xdf, 0x4b, 0x97, 0xfb, 0x0d, 0x99, 0x8c, 0x2d, 0xf4, 0xcf, 0x85,
|
||||||
0xef, 0x17, 0xf6, 0x21, 0xd1, 0x43, 0x28, 0x51, 0xd2, 0x31, 0x35, 0x85, 0x7a, 0x39, 0xe8, 0xb5,
|
0x90, 0xf9, 0x2c, 0x34, 0xd1, 0x63, 0x28, 0xd9, 0x44, 0x23, 0x4d, 0x6a, 0x58, 0xc2, 0xfc, 0x37,
|
||||||
0xf0, 0x14, 0x58, 0xe4, 0x30, 0xb0, 0x0d, 0xa3, 0xb5, 0x29, 0xd8, 0xf8, 0xf6, 0xf1, 0x5d, 0xe2,
|
0xfb, 0x34, 0x5f, 0xd9, 0x26, 0x5a, 0x43, 0x88, 0xd6, 0xcf, 0x32, 0xfb, 0xbd, 0x5f, 0xd8, 0x87,
|
||||||
0x8d, 0x62, 0x1f, 0x06, 0xed, 0xc3, 0xb8, 0x63, 0xb6, 0x18, 0x27, 0x65, 0x65, 0xa8, 0xdd, 0x15,
|
0x44, 0x0f, 0xa1, 0x44, 0x49, 0xc7, 0xd4, 0x14, 0xea, 0xe5, 0xa0, 0xd7, 0xc2, 0x53, 0x60, 0x91,
|
||||||
0x91, 0x74, 0xab, 0x5f, 0xdf, 0x6c, 0x45, 0xa4, 0xeb, 0x33, 0x42, 0xd7, 0x78, 0x74, 0x1c, 0xc7,
|
0xc3, 0xc0, 0x36, 0x8c, 0xd6, 0xa6, 0x60, 0xe3, 0xdb, 0xc7, 0x77, 0x89, 0x37, 0x8a, 0x7d, 0x18,
|
||||||
0xb4, 0xa0, 0x05, 0x98, 0xe8, 0xa8, 0x3a, 0x26, 0x4a, 0xab, 0xdb, 0x20, 0x4d, 0x43, 0x6f, 0xd9,
|
0xb4, 0x0f, 0xe3, 0x8e, 0xd9, 0x62, 0x9c, 0x94, 0xd5, 0xb0, 0x76, 0x57, 0x44, 0xd2, 0xad, 0x7e,
|
||||||
0x3c, 0xac, 0x86, 0xea, 0xb3, 0x02, 0x60, 0x62, 0x35, 0x4a, 0xc6, 0x71, 0x7e, 0xf4, 0x3e, 0x20,
|
0x7d, 0xb3, 0x15, 0x91, 0xae, 0xcf, 0x08, 0x5d, 0xe3, 0xd1, 0x71, 0x1c, 0xd3, 0x82, 0x16, 0x60,
|
||||||
0x6f, 0x1a, 0xf7, 0xdd, 0x2a, 0xaa, 0x1a, 0x3a, 0x8f, 0xb9, 0x7c, 0x10, 0xdc, 0x9b, 0x09, 0x0e,
|
0xa2, 0xa3, 0xea, 0x98, 0x28, 0xad, 0x6e, 0x83, 0x34, 0x0d, 0xbd, 0x65, 0xf3, 0xb0, 0x1a, 0xaa,
|
||||||
0x9c, 0x22, 0x85, 0x56, 0xe0, 0x9c, 0x45, 0xf6, 0x55, 0x36, 0xc7, 0x07, 0xaa, 0x4d, 0x0d, 0xab,
|
0xcf, 0x0a, 0x80, 0x89, 0xd5, 0x28, 0x19, 0xc7, 0xf9, 0xd1, 0xfb, 0x80, 0xbc, 0x69, 0xdc, 0x77,
|
||||||
0xbb, 0xa2, 0x76, 0x54, 0x5a, 0x1e, 0xe6, 0x36, 0x95, 0x8f, 0x0e, 0x2b, 0xe7, 0x70, 0x0a, 0x1d,
|
0x4b, 0xb0, 0x6a, 0xe8, 0x3c, 0xe6, 0xf2, 0x41, 0x70, 0x6f, 0x26, 0x38, 0x70, 0x8a, 0x14, 0x5a,
|
||||||
0xa7, 0x4a, 0xc9, 0xff, 0x3c, 0x0c, 0x13, 0xb1, 0x7c, 0x83, 0x1e, 0xc1, 0x4c, 0xd3, 0x2d, 0x4e,
|
0x81, 0x73, 0x16, 0xd9, 0x57, 0xd9, 0x1c, 0x1f, 0xa8, 0x36, 0x35, 0xac, 0xee, 0x8a, 0xda, 0x51,
|
||||||
0x6b, 0x4e, 0x67, 0x9b, 0x58, 0x8d, 0xe6, 0x2e, 0x69, 0x39, 0x1a, 0x69, 0xf1, 0x40, 0x19, 0xaa,
|
0x69, 0x79, 0x98, 0xdb, 0x54, 0x3e, 0x3a, 0xac, 0x9c, 0xc3, 0x29, 0x74, 0x9c, 0x2a, 0x25, 0xff,
|
||||||
0xcf, 0x0b, 0x8b, 0x67, 0x16, 0x53, 0xb9, 0x70, 0x86, 0x34, 0xf3, 0x82, 0xce, 0x87, 0x56, 0x55,
|
0xcb, 0x30, 0x4c, 0xc4, 0xf2, 0x0d, 0x7a, 0x04, 0x33, 0x4d, 0xb7, 0x38, 0xad, 0x39, 0x9d, 0x6d,
|
||||||
0xdb, 0xf6, 0x31, 0x73, 0x1c, 0xd3, 0xf7, 0xc2, 0x5a, 0x82, 0x03, 0xa7, 0x48, 0x31, 0x1b, 0x5b,
|
0x62, 0x35, 0x9a, 0xbb, 0xa4, 0xe5, 0x68, 0xa4, 0xc5, 0x03, 0x65, 0xa8, 0x3e, 0x2f, 0x2c, 0x9e,
|
||||||
0xc4, 0x56, 0x2d, 0xd2, 0x8a, 0xdb, 0x98, 0x8f, 0xda, 0xb8, 0x94, 0xca, 0x85, 0x33, 0xa4, 0xd1,
|
0x59, 0x4c, 0xe5, 0xc2, 0x19, 0xd2, 0xcc, 0x0b, 0x3a, 0x1f, 0x5a, 0x55, 0x6d, 0xdb, 0xc7, 0xcc,
|
||||||
0x4d, 0x18, 0x75, 0xb5, 0xf1, 0xf5, 0x13, 0x0b, 0xed, 0x97, 0xc3, 0xb5, 0x80, 0x84, 0xc3, 0x7c,
|
0x71, 0x4c, 0xdf, 0x0b, 0x6b, 0x09, 0x0e, 0x9c, 0x22, 0xc5, 0x6c, 0x6c, 0x11, 0x5b, 0xb5, 0x48,
|
||||||
0x6c, 0x6a, 0xc6, 0xb6, 0x4d, 0xac, 0x7d, 0xd2, 0xca, 0x5e, 0xe0, 0xf5, 0x04, 0x07, 0x4e, 0x91,
|
0x2b, 0x6e, 0x63, 0x3e, 0x6a, 0xe3, 0x52, 0x2a, 0x17, 0xce, 0x90, 0x46, 0x37, 0x61, 0xd4, 0xd5,
|
||||||
0x62, 0x53, 0x73, 0x23, 0x30, 0x31, 0xb5, 0xe1, 0xe8, 0xd4, 0xb6, 0x52, 0xb9, 0x70, 0x86, 0x34,
|
0xc6, 0xd7, 0x4f, 0x2c, 0xb4, 0x5f, 0x0e, 0xd7, 0x02, 0x12, 0x0e, 0xf3, 0xb1, 0xa9, 0x19, 0xdb,
|
||||||
0x8b, 0x63, 0xd7, 0xe4, 0x85, 0x7d, 0x45, 0xd5, 0x94, 0x6d, 0x8d, 0x94, 0x8b, 0xd1, 0x38, 0x5e,
|
0x36, 0xb1, 0xf6, 0x49, 0x2b, 0x7b, 0x81, 0xd7, 0x13, 0x1c, 0x38, 0x45, 0x8a, 0x4d, 0xcd, 0x8d,
|
||||||
0x8b, 0x92, 0x71, 0x9c, 0x1f, 0xdd, 0x87, 0x29, 0x77, 0x68, 0x4b, 0x57, 0x7c, 0x90, 0x12, 0x07,
|
0xc0, 0xc4, 0xd4, 0x86, 0xa3, 0x53, 0xdb, 0x4a, 0xe5, 0xc2, 0x19, 0xd2, 0x2c, 0x8e, 0x5d, 0x93,
|
||||||
0x79, 0x59, 0x80, 0x4c, 0xad, 0xc5, 0x19, 0x70, 0x52, 0x06, 0xbd, 0x03, 0xe3, 0x4d, 0x43, 0xd3,
|
0x17, 0xf6, 0x15, 0x55, 0x53, 0xb6, 0x35, 0x52, 0x2e, 0x46, 0xe3, 0x78, 0x2d, 0x4a, 0xc6, 0x71,
|
||||||
0x78, 0x3c, 0x2e, 0x1a, 0x8e, 0x4e, 0xcb, 0x23, 0x1c, 0x05, 0xb1, 0xfd, 0xb8, 0x18, 0xa1, 0xe0,
|
0x7e, 0x74, 0x1f, 0xa6, 0xdc, 0xa1, 0x2d, 0x5d, 0xf1, 0x41, 0x4a, 0x1c, 0xe4, 0x65, 0x01, 0x32,
|
||||||
0x18, 0x27, 0x22, 0x00, 0x4d, 0xaf, 0xe0, 0xd8, 0x65, 0xe8, 0xab, 0xd7, 0x48, 0x16, 0xbd, 0xa0,
|
0xb5, 0x16, 0x67, 0xc0, 0x49, 0x19, 0xf4, 0x0e, 0x8c, 0x37, 0x0d, 0x4d, 0xe3, 0xf1, 0xb8, 0x68,
|
||||||
0x07, 0xf0, 0x87, 0x6c, 0x1c, 0x02, 0x96, 0xff, 0x5f, 0x82, 0xd9, 0x8c, 0xd4, 0x81, 0xde, 0x8b,
|
0x38, 0x3a, 0x2d, 0x8f, 0x70, 0x14, 0xc4, 0xf6, 0xe3, 0x62, 0x84, 0x82, 0x63, 0x9c, 0x88, 0x00,
|
||||||
0x94, 0xd8, 0xdf, 0x8f, 0x95, 0xd8, 0xf3, 0x19, 0x62, 0xa1, 0x3a, 0xab, 0xc3, 0x98, 0xc5, 0x66,
|
0x34, 0xbd, 0x82, 0x63, 0x97, 0xa1, 0xaf, 0x5e, 0x23, 0x59, 0xf4, 0x82, 0x1e, 0xc0, 0x1f, 0xb2,
|
||||||
0xa5, 0xb7, 0x5d, 0x16, 0x91, 0x23, 0x6f, 0xf6, 0x98, 0x06, 0x0e, 0xcb, 0x04, 0x39, 0x7f, 0xea,
|
0x71, 0x08, 0x58, 0xfe, 0xb1, 0x04, 0xb3, 0x19, 0xa9, 0x03, 0xbd, 0x17, 0x29, 0xb1, 0xbf, 0x1f,
|
||||||
0xe8, 0xb0, 0x32, 0x16, 0xa1, 0xe1, 0x28, 0xbc, 0xfc, 0x79, 0x0e, 0x60, 0x89, 0x98, 0x9a, 0xd1,
|
0x2b, 0xb1, 0xe7, 0x33, 0xc4, 0x42, 0x75, 0x56, 0x87, 0x31, 0x8b, 0xcd, 0x4a, 0x6f, 0xbb, 0x2c,
|
||||||
0xed, 0x10, 0xfd, 0x34, 0x7a, 0xa8, 0xf5, 0x48, 0x0f, 0x75, 0xad, 0xd7, 0xf2, 0xf8, 0xa6, 0x65,
|
0x22, 0x47, 0xde, 0xec, 0x31, 0x0d, 0x1c, 0x96, 0x09, 0x72, 0xfe, 0xd4, 0xd1, 0x61, 0x65, 0x2c,
|
||||||
0x36, 0x51, 0x7f, 0x12, 0x6b, 0xa2, 0x6a, 0xfd, 0x43, 0x1e, 0xdf, 0x45, 0xfd, 0x34, 0x0f, 0xd3,
|
0x42, 0xc3, 0x51, 0x78, 0xf9, 0x5f, 0x73, 0x00, 0x4b, 0xc4, 0xd4, 0x8c, 0x6e, 0x87, 0xe8, 0xa7,
|
||||||
0x01, 0x73, 0xd0, 0x46, 0xdd, 0x89, 0xac, 0xf1, 0xef, 0xc5, 0xd6, 0x78, 0x36, 0x45, 0xe4, 0x85,
|
0xd1, 0x43, 0xad, 0x47, 0x7a, 0xa8, 0x6b, 0xbd, 0x96, 0xc7, 0x37, 0x2d, 0xb3, 0x89, 0xfa, 0x93,
|
||||||
0xf5, 0x51, 0xcf, 0xbf, 0x9f, 0x41, 0x4f, 0x60, 0x9c, 0x35, 0x4e, 0x6e, 0x78, 0xf0, 0xb6, 0x6c,
|
0x58, 0x13, 0x55, 0xeb, 0x1f, 0xf2, 0xf8, 0x2e, 0xea, 0xa7, 0x79, 0x98, 0x0e, 0x98, 0x83, 0x36,
|
||||||
0x78, 0xe0, 0xb6, 0xcc, 0x2f, 0xa0, 0x2b, 0x11, 0x24, 0x1c, 0x43, 0xce, 0x68, 0x03, 0x8b, 0x2f,
|
0xea, 0x4e, 0x64, 0x8d, 0x7f, 0x2f, 0xb6, 0xc6, 0xb3, 0x29, 0x22, 0x2f, 0xac, 0x8f, 0x7a, 0xfe,
|
||||||
0xba, 0x0d, 0x94, 0xbf, 0x92, 0x60, 0x3c, 0x58, 0xa6, 0x53, 0x68, 0xda, 0xd6, 0xa2, 0x4d, 0xdb,
|
0xfd, 0x0c, 0x7a, 0x02, 0xe3, 0xac, 0x71, 0x72, 0xc3, 0x83, 0xb7, 0x65, 0xc3, 0x03, 0xb7, 0x65,
|
||||||
0x95, 0xbe, 0x43, 0x34, 0xa3, 0x6b, 0xfb, 0x25, 0x6b, 0xf0, 0x7d, 0x26, 0xb6, 0xc1, 0xb7, 0x95,
|
0x7e, 0x01, 0x5d, 0x89, 0x20, 0xe1, 0x18, 0x72, 0x46, 0x1b, 0x58, 0x7c, 0xd1, 0x6d, 0xa0, 0xfc,
|
||||||
0xe6, 0x5e, 0x1f, 0x9f, 0x7f, 0x9f, 0x49, 0x80, 0x44, 0x15, 0x58, 0xd0, 0x75, 0x83, 0x2a, 0x6e,
|
0xa5, 0x04, 0xe3, 0xc1, 0x32, 0x9d, 0x42, 0xd3, 0xb6, 0x16, 0x6d, 0xda, 0xae, 0xf4, 0x1d, 0xa2,
|
||||||
0xae, 0x74, 0xcd, 0x5a, 0xee, 0xdb, 0x2c, 0x4f, 0x63, 0x75, 0x2b, 0x81, 0x75, 0x57, 0xa7, 0x56,
|
0x19, 0x5d, 0xdb, 0x2f, 0x59, 0x83, 0xef, 0x33, 0xb1, 0x0d, 0xbe, 0xad, 0x34, 0xf7, 0xfa, 0xf8,
|
||||||
0x37, 0x58, 0x91, 0x24, 0x03, 0x4e, 0x31, 0x00, 0x29, 0x00, 0x96, 0xc0, 0xdc, 0x34, 0xc4, 0x46,
|
0xfc, 0xfb, 0x4c, 0x02, 0x24, 0xaa, 0xc0, 0x82, 0xae, 0x1b, 0x54, 0x71, 0x73, 0xa5, 0x6b, 0xd6,
|
||||||
0xbe, 0xd6, 0x47, 0xce, 0x63, 0x02, 0x8b, 0x86, 0xbe, 0xa3, 0xb6, 0x83, 0xb4, 0x83, 0x7d, 0x20,
|
0x72, 0xdf, 0x66, 0x79, 0x1a, 0xab, 0x5b, 0x09, 0xac, 0xbb, 0x3a, 0xb5, 0xba, 0xc1, 0x8a, 0x24,
|
||||||
0x1c, 0x02, 0x9d, 0xbb, 0x0b, 0xb3, 0x19, 0xd6, 0xa2, 0x49, 0xc8, 0xef, 0x91, 0xae, 0xeb, 0x36,
|
0x19, 0x70, 0x8a, 0x01, 0x48, 0x01, 0xb0, 0x04, 0xe6, 0xa6, 0x21, 0x36, 0xf2, 0xb5, 0x3e, 0x72,
|
||||||
0xcc, 0xfe, 0x44, 0xe7, 0xc2, 0x9f, 0xc9, 0x23, 0xe2, 0x0b, 0xf7, 0x9d, 0xdc, 0x6d, 0x49, 0xfe,
|
0x1e, 0x13, 0x58, 0x34, 0xf4, 0x1d, 0xb5, 0x1d, 0xa4, 0x1d, 0xec, 0x03, 0xe1, 0x10, 0xe8, 0xdc,
|
||||||
0x72, 0x28, 0x1c, 0x3b, 0xbc, 0x63, 0xbe, 0x0c, 0x25, 0x8b, 0x98, 0x9a, 0xda, 0x54, 0x6c, 0xd1,
|
0x5d, 0x98, 0xcd, 0xb0, 0x16, 0x4d, 0x42, 0x7e, 0x8f, 0x74, 0x5d, 0xb7, 0x61, 0xf6, 0x27, 0x3a,
|
||||||
0x08, 0xf1, 0xe6, 0x17, 0x8b, 0x31, 0xec, 0x53, 0x23, 0xbd, 0x75, 0xee, 0xc5, 0xf6, 0xd6, 0xf9,
|
0x17, 0xfe, 0x4c, 0x1e, 0x11, 0x5f, 0xb8, 0xef, 0xe4, 0x6e, 0x4b, 0xf2, 0x17, 0x43, 0xe1, 0xd8,
|
||||||
0xe7, 0xd3, 0x5b, 0xff, 0x39, 0x94, 0x6c, 0xaf, 0xab, 0x2e, 0x70, 0xc8, 0xeb, 0x03, 0xe4, 0x57,
|
0xe1, 0x1d, 0xf3, 0x65, 0x28, 0x59, 0xc4, 0xd4, 0xd4, 0xa6, 0x62, 0x8b, 0x46, 0x88, 0x37, 0xbf,
|
||||||
0xd1, 0x50, 0xfb, 0x0a, 0xfc, 0x56, 0xda, 0x07, 0x4d, 0x6b, 0xa2, 0x87, 0x06, 0x6c, 0xa2, 0x9f,
|
0x58, 0x8c, 0x61, 0x9f, 0x1a, 0xe9, 0xad, 0x73, 0x2f, 0xb6, 0xb7, 0xce, 0x3f, 0x9f, 0xde, 0xfa,
|
||||||
0x6b, 0xe3, 0xcb, 0x72, 0xaa, 0xa9, 0x38, 0x36, 0x69, 0xf1, 0x44, 0x54, 0x0a, 0x72, 0xea, 0x06,
|
0xcf, 0xa1, 0x64, 0x7b, 0x5d, 0x75, 0x81, 0x43, 0x5e, 0x1f, 0x20, 0xbf, 0x8a, 0x86, 0xda, 0x57,
|
||||||
0x1f, 0xc5, 0x82, 0x8a, 0x1e, 0x47, 0x42, 0xb6, 0x74, 0x92, 0x90, 0x1d, 0xcf, 0x0e, 0x57, 0xb4,
|
0xe0, 0xb7, 0xd2, 0x3e, 0x68, 0x5a, 0x13, 0x3d, 0x34, 0x60, 0x13, 0xfd, 0x5c, 0x1b, 0x5f, 0x96,
|
||||||
0x05, 0xb3, 0xa6, 0x65, 0xb4, 0x2d, 0x62, 0xdb, 0x4b, 0x44, 0x69, 0x69, 0xaa, 0x4e, 0x3c, 0xff,
|
0x53, 0x4d, 0xc5, 0xb1, 0x49, 0x8b, 0x27, 0xa2, 0x52, 0x90, 0x53, 0x37, 0xf8, 0x28, 0x16, 0x54,
|
||||||
0xb8, 0x1d, 0xd1, 0xf9, 0xa3, 0xc3, 0xca, 0xec, 0x46, 0x3a, 0x0b, 0xce, 0x92, 0x95, 0x9f, 0x16,
|
0xf4, 0x38, 0x12, 0xb2, 0xa5, 0x93, 0x84, 0xec, 0x78, 0x76, 0xb8, 0xa2, 0x2d, 0x98, 0x35, 0x2d,
|
||||||
0x60, 0x32, 0x5e, 0x01, 0x33, 0x9a, 0x54, 0xe9, 0x44, 0x4d, 0xea, 0xd5, 0xd0, 0x66, 0x70, 0x3b,
|
0xa3, 0x6d, 0x11, 0xdb, 0x5e, 0x22, 0x4a, 0x4b, 0x53, 0x75, 0xe2, 0xf9, 0xc7, 0xed, 0x88, 0xce,
|
||||||
0x78, 0x7f, 0xf5, 0x53, 0x36, 0xc4, 0x02, 0x4c, 0x88, 0x6c, 0xe0, 0x11, 0x45, 0x9b, 0xee, 0xaf,
|
0x1f, 0x1d, 0x56, 0x66, 0x37, 0xd2, 0x59, 0x70, 0x96, 0xac, 0xfc, 0xb4, 0x00, 0x93, 0xf1, 0x0a,
|
||||||
0xfe, 0x56, 0x94, 0x8c, 0xe3, 0xfc, 0xac, 0xf5, 0x0c, 0x3a, 0x4a, 0x0f, 0xa4, 0x10, 0x6d, 0x3d,
|
0x98, 0xd1, 0xa4, 0x4a, 0x27, 0x6a, 0x52, 0xaf, 0x86, 0x36, 0x83, 0xdb, 0xc1, 0xfb, 0xab, 0x9f,
|
||||||
0x17, 0xe2, 0x0c, 0x38, 0x29, 0x83, 0x56, 0x61, 0xda, 0xd1, 0x93, 0x50, 0x6e, 0x34, 0x9e, 0x17,
|
0xb2, 0x21, 0x16, 0x60, 0x42, 0x64, 0x03, 0x8f, 0x28, 0xda, 0x74, 0x7f, 0xf5, 0xb7, 0xa2, 0x64,
|
||||||
0x50, 0xd3, 0x5b, 0x49, 0x16, 0x9c, 0x26, 0x87, 0x76, 0x22, 0xdd, 0xe8, 0x30, 0xcf, 0xb0, 0x37,
|
0x1c, 0xe7, 0x67, 0xad, 0x67, 0xd0, 0x51, 0x7a, 0x20, 0x85, 0x68, 0xeb, 0xb9, 0x10, 0x67, 0xc0,
|
||||||
0xfa, 0xde, 0x3b, 0x7d, 0xb7, 0xa3, 0xe8, 0x0e, 0x8c, 0x59, 0xfc, 0xbb, 0xc3, 0x33, 0xd8, 0xed,
|
0x49, 0x19, 0xb4, 0x0a, 0xd3, 0x8e, 0x9e, 0x84, 0x72, 0xa3, 0xf1, 0xbc, 0x80, 0x9a, 0xde, 0x4a,
|
||||||
0xdd, 0x5f, 0x12, 0x62, 0x63, 0x38, 0x4c, 0xc4, 0x51, 0xde, 0x94, 0x76, 0xbb, 0xd4, 0x6f, 0xbb,
|
0xb2, 0xe0, 0x34, 0x39, 0xb4, 0x13, 0xe9, 0x46, 0x87, 0x79, 0x86, 0xbd, 0xd1, 0xf7, 0xde, 0xe9,
|
||||||
0x2d, 0xff, 0xaf, 0x14, 0x2e, 0x42, 0x7e, 0x0b, 0xdc, 0xeb, 0x94, 0x29, 0x21, 0x11, 0xea, 0x8e,
|
0xbb, 0x1d, 0x45, 0x77, 0x60, 0xcc, 0xe2, 0xdf, 0x1d, 0x9e, 0xc1, 0x6e, 0xef, 0xfe, 0x92, 0x10,
|
||||||
0x8c, 0xf4, 0xee, 0xf7, 0xd6, 0x40, 0xdd, 0x6f, 0x50, 0x3c, 0x7b, 0xb7, 0xbf, 0x5f, 0x48, 0x30,
|
0x1b, 0xc3, 0x61, 0x22, 0x8e, 0xf2, 0xa6, 0xb4, 0xdb, 0xa5, 0x7e, 0xdb, 0x6d, 0xf9, 0xff, 0xa5,
|
||||||
0x73, 0xaf, 0x71, 0xdf, 0x32, 0x1c, 0xd3, 0x33, 0x67, 0xdd, 0x74, 0xfd, 0xfa, 0x36, 0x14, 0x2c,
|
0x70, 0x11, 0xf2, 0x5b, 0xe0, 0x5e, 0xa7, 0x4c, 0x09, 0x89, 0x50, 0x77, 0x64, 0xa4, 0x77, 0xbf,
|
||||||
0x47, 0xf3, 0xe6, 0xf1, 0x9a, 0x37, 0x0f, 0xec, 0x68, 0x6c, 0x1e, 0xd3, 0x31, 0x29, 0x77, 0x12,
|
0xb7, 0x06, 0xea, 0x7e, 0x83, 0xe2, 0xd9, 0xbb, 0xfd, 0xfd, 0x5c, 0x82, 0x99, 0x7b, 0x8d, 0xfb,
|
||||||
0x4c, 0x00, 0xad, 0xc1, 0xb0, 0xa5, 0xe8, 0x6d, 0xe2, 0x95, 0xd5, 0x4b, 0x3d, 0xac, 0x5f, 0x5e,
|
0x96, 0xe1, 0x98, 0x9e, 0x39, 0xeb, 0xa6, 0xeb, 0xd7, 0xb7, 0xa1, 0x60, 0x39, 0x9a, 0x37, 0x8f,
|
||||||
0xc2, 0x8c, 0x3d, 0xd4, 0xbc, 0x71, 0x69, 0x2c, 0x50, 0xe4, 0xbf, 0x93, 0x60, 0xe2, 0xc1, 0xe6,
|
0xd7, 0xbc, 0x79, 0x60, 0x47, 0x63, 0xf3, 0x98, 0x8e, 0x49, 0xb9, 0x93, 0x60, 0x02, 0x68, 0x0d,
|
||||||
0xe6, 0xc6, 0xb2, 0xce, 0x77, 0x34, 0x3f, 0x4f, 0xbf, 0x08, 0x05, 0x53, 0xa1, 0xbb, 0xf1, 0x4a,
|
0x86, 0x2d, 0x45, 0x6f, 0x13, 0xaf, 0xac, 0x5e, 0xea, 0x61, 0xfd, 0xf2, 0x12, 0x66, 0xec, 0xa1,
|
||||||
0xcf, 0x68, 0x98, 0x53, 0xd0, 0x07, 0x50, 0x64, 0x99, 0x84, 0xe8, 0xad, 0x3e, 0x5b, 0x6d, 0x01,
|
0xe6, 0x8d, 0x4b, 0x63, 0x81, 0x22, 0xff, 0xbd, 0x04, 0x13, 0x0f, 0x36, 0x37, 0x37, 0x96, 0x75,
|
||||||
0x5f, 0x77, 0x85, 0x82, 0x0e, 0x51, 0x0c, 0x60, 0x0f, 0x4e, 0xde, 0x83, 0x73, 0x21, 0x73, 0x98,
|
0xbe, 0xa3, 0xf9, 0x79, 0xfa, 0x45, 0x28, 0x98, 0x0a, 0xdd, 0x8d, 0x57, 0x7a, 0x46, 0xc3, 0x9c,
|
||||||
0x3f, 0xf8, 0x31, 0x30, 0x6a, 0xc0, 0x10, 0xd3, 0xec, 0x9d, 0xf2, 0xf6, 0x3a, 0xcc, 0x8c, 0x4d,
|
0x82, 0x3e, 0x80, 0x22, 0xcb, 0x24, 0x44, 0x6f, 0xf5, 0xd9, 0x6a, 0x0b, 0xf8, 0xba, 0x2b, 0x14,
|
||||||
0x29, 0xe8, 0x74, 0xd8, 0x2f, 0x1b, 0xbb, 0x58, 0xf2, 0x2a, 0x8c, 0xf1, 0x4b, 0x04, 0xc3, 0xa2,
|
0x74, 0x88, 0x62, 0x00, 0x7b, 0x70, 0xf2, 0x1e, 0x9c, 0x0b, 0x99, 0xc3, 0xfc, 0xc1, 0x8f, 0x81,
|
||||||
0xdc, 0x2d, 0xe8, 0x02, 0xe4, 0x3b, 0xaa, 0x2e, 0xea, 0xec, 0xa8, 0x90, 0xc9, 0xb3, 0x1a, 0xc1,
|
0x51, 0x03, 0x86, 0x98, 0x66, 0xef, 0x94, 0xb7, 0xd7, 0x61, 0x66, 0x6c, 0x4a, 0x41, 0xa7, 0xc3,
|
||||||
0xc6, 0x39, 0x59, 0x39, 0x10, 0x99, 0x27, 0x20, 0x2b, 0x07, 0x98, 0x8d, 0xcb, 0xf7, 0xa1, 0x28,
|
0x7e, 0xd9, 0xd8, 0xc5, 0x92, 0x57, 0x61, 0x8c, 0x5f, 0x22, 0x18, 0x16, 0xe5, 0x6e, 0x41, 0x17,
|
||||||
0xdc, 0x1d, 0x06, 0xca, 0x1f, 0x0f, 0x94, 0x4f, 0x01, 0x5a, 0x87, 0xe2, 0xf2, 0x46, 0x5d, 0x33,
|
0x20, 0xdf, 0x51, 0x75, 0x51, 0x67, 0x47, 0x85, 0x4c, 0x9e, 0xd5, 0x08, 0x36, 0xce, 0xc9, 0xca,
|
||||||
0xdc, 0xae, 0xab, 0xa9, 0xb6, 0xac, 0xf8, 0x5a, 0x2c, 0x2e, 0x2f, 0x61, 0xcc, 0x29, 0x48, 0x86,
|
0x81, 0xc8, 0x3c, 0x01, 0x59, 0x39, 0xc0, 0x6c, 0x5c, 0xbe, 0x0f, 0x45, 0xe1, 0xee, 0x30, 0x50,
|
||||||
0x61, 0x72, 0xd0, 0x24, 0x26, 0xe5, 0x11, 0x31, 0x52, 0x07, 0xb6, 0xca, 0x77, 0xf9, 0x08, 0x16,
|
0xfe, 0x78, 0xa0, 0x7c, 0x0a, 0xd0, 0x3a, 0x14, 0x97, 0x37, 0xea, 0x9a, 0xe1, 0x76, 0x5d, 0x4d,
|
||||||
0x14, 0xf9, 0xef, 0x73, 0x50, 0x14, 0xee, 0x38, 0x85, 0xaf, 0xb0, 0x95, 0xc8, 0x57, 0xd8, 0xeb,
|
0xb5, 0x65, 0xc5, 0xd7, 0x62, 0x71, 0x79, 0x09, 0x63, 0x4e, 0x41, 0x32, 0x0c, 0x93, 0x83, 0x26,
|
||||||
0xfd, 0x85, 0x46, 0xe6, 0x27, 0xd8, 0x66, 0xec, 0x13, 0xec, 0x6a, 0x9f, 0x78, 0xc7, 0x7f, 0x7f,
|
0x31, 0x29, 0x8f, 0x88, 0x91, 0x3a, 0xb0, 0x55, 0xbe, 0xcb, 0x47, 0xb0, 0xa0, 0xc8, 0xff, 0x90,
|
||||||
0xfd, 0x97, 0x04, 0xe3, 0xd1, 0xa0, 0x44, 0x37, 0x61, 0x94, 0x15, 0x1c, 0xb5, 0x49, 0xd6, 0x82,
|
0x83, 0xa2, 0x70, 0xc7, 0x29, 0x7c, 0x85, 0xad, 0x44, 0xbe, 0xc2, 0x5e, 0xef, 0x2f, 0x34, 0x32,
|
||||||
0x3e, 0xd7, 0x3f, 0x84, 0x69, 0x04, 0x24, 0x1c, 0xe6, 0x43, 0x6d, 0x5f, 0x8c, 0xc5, 0x91, 0x98,
|
0x3f, 0xc1, 0x36, 0x63, 0x9f, 0x60, 0x57, 0xfb, 0xc4, 0x3b, 0xfe, 0xfb, 0xeb, 0x7f, 0x24, 0x18,
|
||||||
0x74, 0xb6, 0x4b, 0x1d, 0xaa, 0x6a, 0x55, 0xf7, 0x6e, 0xab, 0xba, 0xac, 0xd3, 0x75, 0xab, 0x41,
|
0x8f, 0x06, 0x25, 0xba, 0x09, 0xa3, 0xac, 0xe0, 0xa8, 0x4d, 0xb2, 0x16, 0xf4, 0xb9, 0xfe, 0x21,
|
||||||
0x2d, 0x55, 0x6f, 0x27, 0x14, 0xf1, 0xa0, 0x0c, 0x23, 0xcb, 0xff, 0x23, 0xc1, 0xa8, 0x30, 0xf9,
|
0x4c, 0x23, 0x20, 0xe1, 0x30, 0x1f, 0x6a, 0xfb, 0x62, 0x2c, 0x8e, 0xc4, 0xa4, 0xb3, 0x5d, 0xea,
|
||||||
0x14, 0xbe, 0x2a, 0xfe, 0x38, 0xfa, 0x55, 0x71, 0xa9, 0xcf, 0x0d, 0x9e, 0xfe, 0x49, 0xf1, 0x6f,
|
0x50, 0x55, 0xab, 0xba, 0x17, 0x63, 0xd5, 0x65, 0x9d, 0xae, 0x5b, 0x0d, 0x6a, 0xa9, 0x7a, 0x3b,
|
||||||
0x81, 0xe9, 0x6c, 0x4b, 0xb3, 0xa8, 0xde, 0x35, 0x6c, 0x1a, 0x8f, 0x6a, 0xb6, 0x19, 0x31, 0xa7,
|
0xa1, 0x88, 0x07, 0x65, 0x18, 0x59, 0xfe, 0x3f, 0x09, 0x46, 0x85, 0xc9, 0xa7, 0xf0, 0x55, 0xf1,
|
||||||
0x20, 0x07, 0x26, 0xd5, 0x58, 0x0e, 0x10, 0xae, 0xad, 0xf5, 0x67, 0x89, 0x2f, 0x56, 0x2f, 0x0b,
|
0xc7, 0xd1, 0xaf, 0x8a, 0x4b, 0x7d, 0x6e, 0xf0, 0xf4, 0x4f, 0x8a, 0xff, 0x08, 0x4c, 0x67, 0x5b,
|
||||||
0xf8, 0xc9, 0x38, 0x05, 0x27, 0x54, 0xc8, 0x04, 0x12, 0x5c, 0xe8, 0x21, 0x14, 0x76, 0x29, 0x35,
|
0x9a, 0x45, 0xf5, 0xae, 0x61, 0xd3, 0x78, 0x54, 0xb3, 0xcd, 0x88, 0x39, 0x05, 0x39, 0x30, 0xa9,
|
||||||
0x53, 0xce, 0xab, 0x7b, 0x64, 0x9e, 0xc0, 0x84, 0x12, 0x9f, 0xdd, 0xe6, 0xe6, 0x06, 0xe6, 0x50,
|
0xc6, 0x72, 0x80, 0x70, 0x6d, 0xad, 0x3f, 0x4b, 0x7c, 0xb1, 0x7a, 0x59, 0xc0, 0x4f, 0xc6, 0x29,
|
||||||
0xf2, 0xaf, 0x02, 0x7f, 0x34, 0xdc, 0x18, 0xf7, 0xf3, 0xa9, 0x74, 0x92, 0x7c, 0x3a, 0x9a, 0x96,
|
0x38, 0xa1, 0x42, 0x26, 0x90, 0xe0, 0x42, 0x0f, 0xa1, 0xb0, 0x4b, 0xa9, 0x99, 0x72, 0x5e, 0xdd,
|
||||||
0x4b, 0xd1, 0x03, 0xc8, 0x53, 0xad, 0xdf, 0xcf, 0x42, 0x81, 0xb8, 0xb9, 0xd2, 0x08, 0x12, 0xd2,
|
0x23, 0xf3, 0x04, 0x26, 0x94, 0xf8, 0xec, 0x36, 0x37, 0x37, 0x30, 0x87, 0x92, 0x7f, 0x15, 0xf8,
|
||||||
0xe6, 0x4a, 0x03, 0x33, 0x08, 0xb4, 0x0e, 0x43, 0xac, 0xfa, 0xb0, 0x2d, 0x98, 0xef, 0x7f, 0x4b,
|
0xa3, 0xe1, 0xc6, 0xb8, 0x9f, 0x4f, 0xa5, 0x93, 0xe4, 0xd3, 0xd1, 0xb4, 0x5c, 0x8a, 0x1e, 0x40,
|
||||||
0xb3, 0xf9, 0x07, 0x01, 0xc1, 0x7e, 0xd9, 0xd8, 0xc5, 0x91, 0x3f, 0x81, 0xb1, 0xc8, 0x3e, 0x45,
|
0x9e, 0x6a, 0xfd, 0x7e, 0x16, 0x0a, 0xc4, 0xcd, 0x95, 0x46, 0x90, 0x90, 0x36, 0x57, 0x1a, 0x98,
|
||||||
0x1f, 0xc3, 0x59, 0xcd, 0x50, 0x5a, 0x75, 0x45, 0x53, 0xf4, 0x26, 0xf1, 0x2e, 0x07, 0x2e, 0xa5,
|
0x41, 0xa0, 0x75, 0x18, 0x62, 0xd5, 0x87, 0x6d, 0xc1, 0x7c, 0xff, 0x5b, 0x9a, 0xcd, 0x3f, 0x08,
|
||||||
0x7d, 0x61, 0xac, 0x84, 0xf8, 0xc4, 0x2e, 0xf7, 0xaf, 0x53, 0xc3, 0x34, 0x1c, 0x41, 0x94, 0x15,
|
0x08, 0xf6, 0xcb, 0xc6, 0x2e, 0x8e, 0xfc, 0x09, 0x8c, 0x45, 0xf6, 0x29, 0xfa, 0x18, 0xce, 0x6a,
|
||||||
0x80, 0x60, 0x8e, 0xa8, 0x02, 0x43, 0x2c, 0xce, 0xdc, 0x7a, 0x32, 0x52, 0x1f, 0x61, 0x16, 0xb2,
|
0x86, 0xd2, 0xaa, 0x2b, 0x9a, 0xa2, 0x37, 0x89, 0x77, 0x39, 0x70, 0x29, 0xed, 0x0b, 0x63, 0x25,
|
||||||
0xf0, 0xb3, 0xb1, 0x3b, 0x8e, 0x6e, 0x00, 0xd8, 0xa4, 0x69, 0x11, 0xca, 0x93, 0x41, 0x2e, 0x7a,
|
0xc4, 0x27, 0x76, 0xb9, 0x7f, 0x9d, 0x1a, 0xa6, 0xe1, 0x08, 0xa2, 0xac, 0x00, 0x04, 0x73, 0x44,
|
||||||
0xa9, 0xdc, 0xf0, 0x29, 0x38, 0xc4, 0x25, 0xff, 0x9f, 0x04, 0x63, 0x6b, 0x84, 0x7e, 0x6a, 0x58,
|
0x15, 0x18, 0x62, 0x71, 0xe6, 0xd6, 0x93, 0x91, 0xfa, 0x08, 0xb3, 0x90, 0x85, 0x9f, 0x8d, 0xdd,
|
||||||
0x7b, 0x1b, 0xfc, 0x3e, 0xff, 0x14, 0x92, 0x2d, 0x8e, 0x24, 0xdb, 0x37, 0x7a, 0xac, 0x4c, 0xc4,
|
0x71, 0x74, 0x03, 0xc0, 0x26, 0x4d, 0x8b, 0x50, 0x9e, 0x0c, 0x72, 0xd1, 0x4b, 0xe5, 0x86, 0x4f,
|
||||||
0xba, 0xac, 0x94, 0x2b, 0x7f, 0x25, 0xc1, 0x6c, 0x84, 0xf3, 0x6e, 0xb0, 0x75, 0xb7, 0x60, 0xc8,
|
0xc1, 0x21, 0x2e, 0xf9, 0x47, 0x12, 0x8c, 0xad, 0x11, 0xfa, 0xa9, 0x61, 0xed, 0x6d, 0xf0, 0xc7,
|
||||||
0x34, 0x2c, 0xea, 0x15, 0xe2, 0x81, 0x14, 0xb2, 0x34, 0x16, 0x2a, 0xc5, 0x0c, 0x06, 0xbb, 0x68,
|
0x00, 0xa7, 0x90, 0x6c, 0x71, 0x24, 0xd9, 0xbe, 0xd1, 0x63, 0x65, 0x22, 0xd6, 0x65, 0xa5, 0x5c,
|
||||||
0x68, 0x05, 0x72, 0xd4, 0x10, 0xa1, 0x3a, 0x18, 0x26, 0x21, 0x56, 0x1d, 0x04, 0x66, 0x6e, 0xd3,
|
0xf9, 0x4b, 0x09, 0x66, 0x23, 0x9c, 0x77, 0x83, 0xad, 0xbb, 0x05, 0x43, 0xa6, 0x61, 0x51, 0xaf,
|
||||||
0xc0, 0x39, 0x6a, 0xb0, 0x85, 0x28, 0x47, 0xb8, 0xc2, 0xc9, 0xe7, 0x05, 0xcd, 0x00, 0x43, 0x61,
|
0x10, 0x0f, 0xa4, 0x90, 0xa5, 0xb1, 0x50, 0x29, 0x66, 0x30, 0xd8, 0x45, 0x43, 0x2b, 0x90, 0xa3,
|
||||||
0xc7, 0x32, 0x3a, 0x27, 0x9e, 0x83, 0xbf, 0x10, 0xf7, 0x2c, 0xa3, 0x83, 0x39, 0x96, 0xfc, 0xb5,
|
0x86, 0x08, 0xd5, 0xc1, 0x30, 0x09, 0xb1, 0xea, 0x20, 0x30, 0x73, 0x9b, 0x06, 0xce, 0x51, 0x83,
|
||||||
0x04, 0x53, 0x11, 0xce, 0x53, 0x48, 0xfc, 0x0f, 0xa3, 0x89, 0xff, 0xea, 0x20, 0x13, 0xc9, 0x48,
|
0x2d, 0x44, 0x39, 0xc2, 0x15, 0x4e, 0x3e, 0x2f, 0x68, 0x06, 0x18, 0x0a, 0x3b, 0x96, 0xd1, 0x39,
|
||||||
0xff, 0x5f, 0xe7, 0x62, 0xd3, 0x60, 0x13, 0x46, 0x3b, 0x30, 0x6a, 0x1a, 0xad, 0xc6, 0x73, 0xb8,
|
0xf1, 0x1c, 0xfc, 0x85, 0xb8, 0x67, 0x19, 0x1d, 0xcc, 0xb1, 0xe4, 0xaf, 0x24, 0x98, 0x8a, 0x70,
|
||||||
0x0e, 0x9c, 0x60, 0x75, 0x73, 0x23, 0xc0, 0xc2, 0x61, 0x60, 0x74, 0x00, 0x53, 0xba, 0xd2, 0x21,
|
0x9e, 0x42, 0xe2, 0x7f, 0x18, 0x4d, 0xfc, 0x57, 0x07, 0x99, 0x48, 0x46, 0xfa, 0xff, 0x2a, 0x17,
|
||||||
0xb6, 0xa9, 0x34, 0x49, 0xe3, 0x39, 0x1c, 0x90, 0xbc, 0xc4, 0xef, 0x1b, 0xe2, 0x88, 0x38, 0xa9,
|
0x9b, 0x06, 0x9b, 0x30, 0xda, 0x81, 0x51, 0xd3, 0x68, 0x35, 0x9e, 0xc3, 0x75, 0xe0, 0x04, 0xab,
|
||||||
0x04, 0xad, 0x42, 0x51, 0x35, 0x79, 0x1f, 0x27, 0x7a, 0x97, 0x9e, 0x55, 0xd4, 0xed, 0xfa, 0xdc,
|
0x9b, 0x1b, 0x01, 0x16, 0x0e, 0x03, 0xa3, 0x03, 0x98, 0xd2, 0x95, 0x0e, 0xb1, 0x4d, 0xa5, 0x49,
|
||||||
0x7c, 0x2e, 0x7e, 0x60, 0x0f, 0x43, 0xfe, 0xf7, 0x78, 0x34, 0xb0, 0xf8, 0x43, 0xf7, 0xa1, 0xc4,
|
0x1a, 0xcf, 0xe1, 0x80, 0xe4, 0x25, 0x7e, 0xdf, 0x10, 0x47, 0xc4, 0x49, 0x25, 0x68, 0x15, 0x8a,
|
||||||
0x5f, 0xc6, 0x34, 0x0d, 0xcd, 0xbb, 0x19, 0x60, 0x2b, 0xbb, 0x21, 0xc6, 0x9e, 0x1d, 0x56, 0xce,
|
0xaa, 0xc9, 0xfb, 0x38, 0xd1, 0xbb, 0xf4, 0xac, 0xa2, 0x6e, 0xd7, 0xe7, 0xe6, 0x73, 0xf1, 0x03,
|
||||||
0xa7, 0x1c, 0xfa, 0x7a, 0x64, 0xec, 0x0b, 0xa3, 0x35, 0x28, 0x98, 0x3f, 0xa6, 0x83, 0xe1, 0x45,
|
0x7b, 0x18, 0xf2, 0x7f, 0xc6, 0xa3, 0x81, 0xc5, 0x1f, 0xba, 0x0f, 0x25, 0xfe, 0xac, 0xa6, 0x69,
|
||||||
0x8e, 0xb7, 0x2d, 0x1c, 0x47, 0xfe, 0xab, 0x7c, 0xcc, 0x5c, 0x5e, 0xea, 0x9e, 0x3c, 0xb7, 0x55,
|
0x68, 0xde, 0xcd, 0x00, 0x5b, 0xd9, 0x0d, 0x31, 0xf6, 0xec, 0xb0, 0x72, 0x3e, 0xe5, 0xd0, 0xd7,
|
||||||
0xf7, 0x3b, 0xa6, 0xcc, 0x95, 0xdf, 0x86, 0xa2, 0xa8, 0xf0, 0x22, 0x98, 0xdf, 0x1e, 0x24, 0x98,
|
0x23, 0x63, 0x5f, 0x18, 0xad, 0x41, 0xc1, 0xfc, 0x21, 0x1d, 0x0c, 0x2f, 0x72, 0xbc, 0x6d, 0xe1,
|
||||||
0xc3, 0x55, 0xcc, 0xff, 0x60, 0xf1, 0x06, 0x3d, 0x60, 0xf4, 0x11, 0x0c, 0x13, 0x57, 0x85, 0x5b,
|
0x38, 0xf2, 0x5f, 0xe7, 0x63, 0xe6, 0xf2, 0x52, 0xf7, 0xe4, 0xb9, 0xad, 0xba, 0xdf, 0x31, 0x65,
|
||||||
0x1b, 0x6f, 0x0d, 0xa2, 0x22, 0xc8, 0xab, 0x41, 0xa3, 0x2a, 0xc6, 0x04, 0x2a, 0x7a, 0x8f, 0xf9,
|
0xae, 0xfc, 0x36, 0x14, 0x45, 0x85, 0x17, 0xc1, 0xfc, 0xf6, 0x20, 0xc1, 0x1c, 0xae, 0x62, 0xfe,
|
||||||
0x8b, 0xf1, 0xb2, 0x8f, 0x40, 0xbb, 0x5c, 0xe0, 0xe5, 0xea, 0x82, 0x3b, 0x6d, 0x7f, 0xf8, 0xd9,
|
0x07, 0x8b, 0x37, 0xe8, 0x01, 0xa3, 0x8f, 0x60, 0x98, 0xb8, 0x2a, 0xdc, 0xda, 0x78, 0x6b, 0x10,
|
||||||
0x61, 0x05, 0x82, 0x9f, 0x38, 0x2c, 0x21, 0xff, 0x44, 0x82, 0x29, 0xee, 0xa1, 0xa6, 0x63, 0xa9,
|
0x15, 0x41, 0x5e, 0x0d, 0x1a, 0x55, 0x31, 0x26, 0x50, 0xd1, 0x7b, 0xcc, 0x5f, 0x8c, 0x97, 0x7d,
|
||||||
0xb4, 0x7b, 0x6a, 0x85, 0xe9, 0x51, 0xa4, 0x30, 0xbd, 0xd5, 0xc3, 0x2d, 0x09, 0x0b, 0x33, 0x8b,
|
0x04, 0xda, 0xe5, 0x02, 0x2f, 0x57, 0x17, 0xdc, 0x69, 0xfb, 0xc3, 0xcf, 0x0e, 0x2b, 0x10, 0xfc,
|
||||||
0xd3, 0x37, 0x12, 0xbc, 0x94, 0xe0, 0x3e, 0x85, 0xbc, 0xb8, 0x15, 0xcd, 0x8b, 0x6f, 0x0c, 0x3a,
|
0xc4, 0x61, 0x09, 0xf9, 0x27, 0x12, 0x4c, 0x71, 0x0f, 0x35, 0x1d, 0x4b, 0xa5, 0xdd, 0x53, 0x2b,
|
||||||
0xa1, 0x8c, 0xdc, 0xf8, 0xf9, 0xd9, 0x94, 0xe9, 0xf0, 0x9d, 0x72, 0x03, 0xc0, 0xb4, 0xd4, 0x7d,
|
0x4c, 0x8f, 0x22, 0x85, 0xe9, 0xad, 0x1e, 0x6e, 0x49, 0x58, 0x98, 0x59, 0x9c, 0xbe, 0x96, 0xe0,
|
||||||
0x55, 0x23, 0x6d, 0x71, 0x09, 0x5e, 0x0a, 0x3d, 0x6b, 0xf3, 0x29, 0x38, 0xc4, 0x85, 0x6c, 0x98,
|
0xa5, 0x04, 0xf7, 0x29, 0xe4, 0xc5, 0xad, 0x68, 0x5e, 0x7c, 0x63, 0xd0, 0x09, 0x65, 0xbd, 0x91,
|
||||||
0x69, 0x91, 0x1d, 0xc5, 0xd1, 0xe8, 0x42, 0xab, 0xb5, 0xa8, 0x98, 0xca, 0xb6, 0xaa, 0xa9, 0x54,
|
0x18, 0x4b, 0x99, 0x0e, 0xdf, 0x29, 0x37, 0x00, 0x4c, 0x4b, 0xdd, 0x57, 0x35, 0xd2, 0x16, 0x97,
|
||||||
0x15, 0xc7, 0x05, 0x23, 0xf5, 0x3b, 0xee, 0xe5, 0x74, 0x1a, 0xc7, 0xb3, 0xc3, 0xca, 0x85, 0xb4,
|
0xe0, 0xa5, 0xd0, 0xb3, 0x36, 0x9f, 0x82, 0x43, 0x5c, 0xc8, 0x86, 0x99, 0x16, 0xd9, 0x51, 0x1c,
|
||||||
0xdb, 0x21, 0x8f, 0xa5, 0x8b, 0x33, 0xa0, 0x51, 0x17, 0xca, 0x16, 0xf9, 0xc4, 0x51, 0x2d, 0xd2,
|
0x8d, 0x2e, 0xb4, 0x5a, 0x8b, 0x8a, 0xa9, 0x6c, 0xab, 0x9a, 0x4a, 0x55, 0x71, 0x5c, 0x30, 0x52,
|
||||||
0x5a, 0xb2, 0x0c, 0x33, 0xa2, 0x36, 0xcf, 0xd5, 0xfe, 0xe1, 0xd1, 0x61, 0xa5, 0x8c, 0x33, 0x78,
|
0xbf, 0xe3, 0x5e, 0x4e, 0xa7, 0x71, 0x3c, 0x3b, 0xac, 0x5c, 0x48, 0xbb, 0x1d, 0xf2, 0x58, 0xba,
|
||||||
0x7a, 0x2b, 0xce, 0x84, 0x47, 0x4f, 0x60, 0x5a, 0x71, 0x5f, 0x03, 0x46, 0xb4, 0xba, 0xbb, 0xe4,
|
0x38, 0x03, 0x1a, 0x75, 0xa1, 0x6c, 0x91, 0x4f, 0x1c, 0xd5, 0x22, 0xad, 0x25, 0xcb, 0x30, 0x23,
|
||||||
0xf6, 0xd1, 0x61, 0x65, 0x7a, 0x21, 0x49, 0xee, 0xad, 0x30, 0x0d, 0x14, 0xd5, 0xa0, 0xb8, 0x6f,
|
0x6a, 0xf3, 0x5c, 0xed, 0x1f, 0x1e, 0x1d, 0x56, 0xca, 0x38, 0x83, 0xa7, 0xb7, 0xe2, 0x4c, 0x78,
|
||||||
0x68, 0x4e, 0x87, 0xd8, 0xe5, 0x21, 0x8e, 0xcf, 0x0a, 0x41, 0xf1, 0x91, 0x3b, 0xf4, 0xec, 0xb0,
|
0xf4, 0x04, 0xa6, 0x15, 0xf7, 0x35, 0x60, 0x44, 0xab, 0xbb, 0x4b, 0x6e, 0x1f, 0x1d, 0x56, 0xa6,
|
||||||
0x32, 0x7c, 0xaf, 0xc1, 0x77, 0x9f, 0xc7, 0xc5, 0x3e, 0x28, 0x59, 0x2f, 0x29, 0x76, 0x3c, 0x3f,
|
0x17, 0x92, 0xe4, 0xde, 0x0a, 0xd3, 0x40, 0x51, 0x0d, 0x8a, 0xfb, 0xfc, 0xad, 0xa2, 0x5d, 0x1e,
|
||||||
0x31, 0x2e, 0x05, 0x59, 0xeb, 0x41, 0x40, 0xc2, 0x61, 0x3e, 0xf4, 0x18, 0x46, 0x76, 0xc5, 0xa9,
|
0xe2, 0xf8, 0xac, 0x10, 0x14, 0xdd, 0xe7, 0x8b, 0x0c, 0x73, 0xf8, 0x5e, 0x83, 0xef, 0x3e, 0x8f,
|
||||||
0x84, 0x5d, 0x2e, 0xf6, 0x55, 0x84, 0x23, 0xa7, 0x18, 0xf5, 0x29, 0xa1, 0x62, 0xc4, 0x1b, 0xb6,
|
0x8b, 0x7d, 0x50, 0xb2, 0x5e, 0x52, 0xec, 0x78, 0x7e, 0x62, 0x5c, 0x0a, 0xb2, 0xd6, 0x83, 0x80,
|
||||||
0x71, 0x80, 0x88, 0xae, 0x40, 0x91, 0xff, 0x58, 0x5e, 0xe2, 0xc7, 0x71, 0xa5, 0x20, 0xb7, 0x3d,
|
0x84, 0xc3, 0x7c, 0xe8, 0x31, 0x8c, 0xec, 0x8a, 0x53, 0x09, 0xbb, 0x5c, 0xec, 0xab, 0x08, 0x47,
|
||||||
0x70, 0x87, 0xb1, 0x47, 0xf7, 0x58, 0x97, 0x37, 0x16, 0xf9, 0xb1, 0x70, 0x8c, 0x75, 0x79, 0x63,
|
0x4e, 0x31, 0xea, 0x53, 0x42, 0xc5, 0x88, 0x37, 0x6c, 0xe3, 0x00, 0x11, 0x5d, 0x81, 0x22, 0xff,
|
||||||
0x11, 0x7b, 0x74, 0xf4, 0x31, 0x14, 0x6d, 0xb2, 0xa2, 0xea, 0xce, 0x41, 0x19, 0xfa, 0xba, 0x54,
|
0xb1, 0xbc, 0xc4, 0x8f, 0xe3, 0x4a, 0x41, 0x6e, 0x7b, 0xe0, 0x0e, 0x63, 0x8f, 0xee, 0xb1, 0x2e,
|
||||||
0x6e, 0xdc, 0xe5, 0xdc, 0xb1, 0x83, 0xb1, 0x40, 0x83, 0xa0, 0x63, 0x0f, 0x16, 0xed, 0xc2, 0x88,
|
0x6f, 0x2c, 0xf2, 0x63, 0xe1, 0x18, 0xeb, 0xf2, 0xc6, 0x22, 0xf6, 0xe8, 0xe8, 0x63, 0x28, 0xda,
|
||||||
0xe5, 0xe8, 0x0b, 0xf6, 0x96, 0x4d, 0xac, 0xf2, 0x28, 0xd7, 0xd1, 0x2b, 0x9d, 0x63, 0x8f, 0x3f,
|
0x64, 0x45, 0xd5, 0x9d, 0x83, 0x32, 0xf4, 0x75, 0xa9, 0xdc, 0xb8, 0xcb, 0xb9, 0x63, 0x07, 0x63,
|
||||||
0xae, 0xc5, 0xf7, 0x90, 0xcf, 0x81, 0x03, 0x70, 0xf4, 0xb7, 0x12, 0x20, 0xdb, 0x31, 0x4d, 0x8d,
|
0x81, 0x06, 0x41, 0xc7, 0x1e, 0x2c, 0xda, 0x85, 0x11, 0xcb, 0xd1, 0x17, 0xec, 0x2d, 0x9b, 0x58,
|
||||||
0x74, 0x88, 0x4e, 0x15, 0x8d, 0x9f, 0xc5, 0xd9, 0xe5, 0xb3, 0x5c, 0xe7, 0x1f, 0xf5, 0x9a, 0x57,
|
0xe5, 0x51, 0xae, 0xa3, 0x57, 0x3a, 0xc7, 0x1e, 0x7f, 0x5c, 0x8b, 0xef, 0x21, 0x9f, 0x03, 0x07,
|
||||||
0x42, 0x30, 0xae, 0xdc, 0x3f, 0xf4, 0x4e, 0xb2, 0xe2, 0x14, 0xbd, 0xcc, 0xb5, 0x3b, 0x36, 0xff,
|
0xe0, 0xe8, 0xef, 0x24, 0x40, 0xb6, 0x63, 0x9a, 0x1a, 0xe9, 0x10, 0x9d, 0x2a, 0x1a, 0x3f, 0x8b,
|
||||||
0xbb, 0x3c, 0xd6, 0x97, 0x6b, 0xd3, 0xcf, 0x1c, 0x03, 0xd7, 0x0a, 0x3a, 0xf6, 0x60, 0xd1, 0x23,
|
0xb3, 0xcb, 0x67, 0xb9, 0xce, 0x3f, 0xea, 0x35, 0xaf, 0x84, 0x60, 0x5c, 0xb9, 0x7f, 0xe8, 0x9d,
|
||||||
0x98, 0xb1, 0x88, 0xd2, 0x5a, 0xd7, 0xb5, 0x2e, 0x36, 0x0c, 0x7a, 0x4f, 0xd5, 0x88, 0xdd, 0xb5,
|
0x64, 0xc5, 0x29, 0x7a, 0x99, 0x6b, 0x77, 0x6c, 0xfe, 0x77, 0x79, 0xac, 0x2f, 0xd7, 0xa6, 0x9f,
|
||||||
0x29, 0xe9, 0x94, 0xc7, 0xf9, 0xb2, 0xfb, 0x6f, 0x3f, 0x70, 0x2a, 0x17, 0xce, 0x90, 0x46, 0x1d,
|
0x39, 0x06, 0xae, 0x15, 0x74, 0xec, 0xc1, 0xa2, 0x47, 0x30, 0x63, 0x11, 0xa5, 0xb5, 0xae, 0x6b,
|
||||||
0xa8, 0x78, 0x29, 0x83, 0xed, 0x27, 0x3f, 0x67, 0xdd, 0xb5, 0x9b, 0x8a, 0xe6, 0xde, 0x03, 0x4c,
|
0x5d, 0x6c, 0x18, 0xf4, 0x9e, 0xaa, 0x11, 0xbb, 0x6b, 0x53, 0xd2, 0x29, 0x8f, 0xf3, 0x65, 0xf7,
|
||||||
0x70, 0x05, 0xaf, 0x1d, 0x1d, 0x56, 0x2a, 0x4b, 0xc7, 0xb3, 0xe2, 0x5e, 0x58, 0xe8, 0x03, 0x28,
|
0xdf, 0x7e, 0xe0, 0x54, 0x2e, 0x9c, 0x21, 0x8d, 0x3a, 0x50, 0xf1, 0x52, 0x06, 0xdb, 0x4f, 0x7e,
|
||||||
0x2b, 0x59, 0x7a, 0x26, 0xb9, 0x9e, 0x57, 0x58, 0x1e, 0xca, 0x54, 0x90, 0x29, 0x8d, 0x28, 0x4c,
|
0xce, 0xba, 0x6b, 0x37, 0x15, 0xcd, 0xbd, 0x07, 0x98, 0xe0, 0x0a, 0x5e, 0x3b, 0x3a, 0xac, 0x54,
|
||||||
0x2a, 0xd1, 0x47, 0xc7, 0x76, 0x79, 0xaa, 0xaf, 0x83, 0xc8, 0xd8, 0x5b, 0xe5, 0xe0, 0x30, 0x22,
|
0x96, 0x8e, 0x67, 0xc5, 0xbd, 0xb0, 0xd0, 0x07, 0x50, 0x56, 0xb2, 0xf4, 0x4c, 0x72, 0x3d, 0xaf,
|
||||||
0x46, 0xb0, 0x71, 0x42, 0x03, 0x7f, 0x3e, 0x21, 0x0e, 0xd3, 0x4f, 0xe7, 0x09, 0xea, 0x60, 0xcf,
|
0xb0, 0x3c, 0x94, 0xa9, 0x20, 0x53, 0x1a, 0x51, 0x98, 0x54, 0xa2, 0x8f, 0x8e, 0xed, 0xf2, 0x54,
|
||||||
0x27, 0x02, 0xd3, 0x9e, 0xdb, 0xf3, 0x89, 0x10, 0xe4, 0xf1, 0xc7, 0x77, 0xbf, 0xc8, 0xc1, 0x74,
|
0x5f, 0x07, 0x91, 0xb1, 0xb7, 0xca, 0xc1, 0x61, 0x44, 0x8c, 0x60, 0xe3, 0x84, 0x06, 0xf4, 0x17,
|
||||||
0xc0, 0xdc, 0xf7, 0xf3, 0x89, 0x14, 0x91, 0xdf, 0x3d, 0x43, 0xed, 0xfd, 0x0c, 0xf5, 0x2b, 0x09,
|
0x80, 0x94, 0xf8, 0x3b, 0x69, 0xbb, 0x8c, 0xfa, 0x2a, 0x3f, 0x89, 0x07, 0xd6, 0x41, 0xd8, 0x25,
|
||||||
0xc6, 0x03, 0xd7, 0xfd, 0xe6, 0x3d, 0x69, 0x08, 0x6c, 0xcb, 0x68, 0xb2, 0xfe, 0x33, 0x17, 0x9e,
|
0x48, 0x36, 0x4e, 0xd1, 0xc3, 0x1f, 0x6f, 0x88, 0xa3, 0xfc, 0xd3, 0x79, 0x00, 0x3b, 0xd8, 0xe3,
|
||||||
0xc0, 0x6f, 0xfd, 0xbd, 0xfa, 0x8f, 0x7f, 0x3b, 0x2a, 0x7f, 0x93, 0x87, 0xc9, 0xf8, 0x6e, 0x8c,
|
0x8d, 0xc0, 0xb4, 0xe7, 0xf6, 0x78, 0x23, 0x04, 0x79, 0xfc, 0xe1, 0xe1, 0x2f, 0x72, 0x30, 0x1d,
|
||||||
0x5c, 0xbf, 0x4a, 0x3d, 0xaf, 0x5f, 0x37, 0xe0, 0xdc, 0x8e, 0xa3, 0x69, 0x5d, 0xee, 0x86, 0xd0,
|
0x30, 0xf7, 0xfd, 0x78, 0x23, 0x45, 0xe4, 0x77, 0x8f, 0x60, 0x7b, 0x3f, 0x82, 0xfd, 0x52, 0x82,
|
||||||
0x1d, 0xac, 0x7b, 0x7d, 0xf2, 0x8a, 0x90, 0x3c, 0x77, 0x2f, 0x85, 0x07, 0xa7, 0x4a, 0x66, 0x5c,
|
0xf1, 0xc0, 0x75, 0xbf, 0x79, 0x0f, 0x2a, 0x02, 0xdb, 0x32, 0x5a, 0xbc, 0xff, 0xce, 0x85, 0x27,
|
||||||
0x25, 0xe7, 0x4f, 0x74, 0x95, 0x9c, 0xb8, 0xd9, 0x2c, 0x0c, 0x70, 0xb3, 0x99, 0x7a, 0x2d, 0x3c,
|
0xf0, 0x5b, 0x7f, 0xab, 0xff, 0xc3, 0x5f, 0xae, 0xca, 0x5f, 0xe7, 0x61, 0x32, 0xbe, 0x1b, 0x23,
|
||||||
0x74, 0x82, 0x6b, 0xe1, 0x93, 0xdc, 0xe3, 0xa6, 0x24, 0xb1, 0x9e, 0xcf, 0x0a, 0x5f, 0x81, 0x39,
|
0x97, 0xbf, 0x52, 0xcf, 0xcb, 0xdf, 0x0d, 0x38, 0xb7, 0xe3, 0x68, 0x5a, 0x97, 0xbb, 0x21, 0x74,
|
||||||
0x21, 0x46, 0xf9, 0x15, 0xab, 0x4e, 0x2d, 0x43, 0xd3, 0x88, 0xb5, 0xe4, 0x74, 0x3a, 0x5d, 0xf9,
|
0x03, 0xec, 0x5e, 0xde, 0xbc, 0x22, 0x24, 0xcf, 0xdd, 0x4b, 0xe1, 0xc1, 0xa9, 0x92, 0x19, 0x17,
|
||||||
0x5d, 0x18, 0x8f, 0x3e, 0x1e, 0x70, 0x57, 0xda, 0x7d, 0xbf, 0x20, 0x2e, 0xb1, 0x42, 0x2b, 0xed,
|
0xd9, 0xf9, 0x13, 0x5d, 0x64, 0x27, 0xee, 0x55, 0x0b, 0x03, 0xdc, 0xab, 0xa6, 0x5e, 0x4a, 0x0f,
|
||||||
0x8e, 0x63, 0x9f, 0x43, 0xfe, 0x6b, 0x09, 0x66, 0xd2, 0x1f, 0x09, 0x22, 0x0d, 0xc6, 0x3b, 0xca,
|
0x9d, 0xe0, 0x52, 0xfa, 0x24, 0xb7, 0xc8, 0x29, 0x49, 0xac, 0xe7, 0xa3, 0xc6, 0x57, 0x60, 0x4e,
|
||||||
0x41, 0xf8, 0xe1, 0xa6, 0x74, 0xc2, 0xe3, 0x05, 0x7e, 0x6b, 0xbc, 0x1a, 0xc1, 0xc2, 0x31, 0x6c,
|
0x88, 0x51, 0x7e, 0xc1, 0xab, 0x53, 0xcb, 0xd0, 0x34, 0x62, 0x2d, 0x39, 0x9d, 0x4e, 0x57, 0x7e,
|
||||||
0xf9, 0x07, 0x09, 0x66, 0x33, 0xee, 0x6b, 0x4f, 0xd7, 0x12, 0xf4, 0x21, 0x94, 0x3a, 0xca, 0x41,
|
0x17, 0xc6, 0xa3, 0x4f, 0x17, 0xdc, 0x95, 0x76, 0x5f, 0x4f, 0x88, 0x2b, 0xb4, 0xd0, 0x4a, 0xbb,
|
||||||
0xc3, 0xb1, 0xda, 0xe4, 0xc4, 0x07, 0x2a, 0x3c, 0x63, 0xac, 0x0a, 0x14, 0xec, 0xe3, 0xc9, 0x5f,
|
0xe3, 0xd8, 0xe7, 0x90, 0xff, 0x46, 0x82, 0x99, 0xf4, 0x27, 0x8a, 0x48, 0x83, 0xf1, 0x8e, 0x72,
|
||||||
0x48, 0x50, 0xce, 0x6a, 0x6d, 0xd1, 0xcd, 0xc8, 0xcd, 0xf2, 0xab, 0xb1, 0x9b, 0xe5, 0xa9, 0x84,
|
0x10, 0x7e, 0x36, 0x2a, 0x9d, 0xf0, 0x70, 0x83, 0xdf, 0x59, 0xaf, 0x46, 0xb0, 0x70, 0x0c, 0x5b,
|
||||||
0xdc, 0x0b, 0xba, 0x57, 0xfe, 0x0f, 0x09, 0x66, 0xd2, 0x5b, 0x7c, 0xf4, 0x66, 0xc4, 0xc2, 0x4a,
|
0xfe, 0x5e, 0x82, 0xd9, 0x8c, 0xdb, 0xe2, 0xd3, 0xb5, 0x04, 0x7d, 0x08, 0xa5, 0x8e, 0x72, 0xd0,
|
||||||
0xcc, 0xc2, 0x89, 0x98, 0x94, 0xb0, 0xef, 0x23, 0x18, 0x17, 0x1f, 0x02, 0x02, 0x46, 0x78, 0x55,
|
0x70, 0xac, 0x36, 0x39, 0xf1, 0x71, 0x0e, 0xcf, 0x18, 0xab, 0x02, 0x05, 0xfb, 0x78, 0xf2, 0xe7,
|
||||||
0x4e, 0xcb, 0x95, 0x02, 0xc2, 0x6b, 0x7c, 0xf9, 0x7a, 0x45, 0xc7, 0x70, 0x0c, 0x4d, 0xfe, 0x9b,
|
0x12, 0x94, 0xb3, 0x1a, 0x6b, 0x74, 0x33, 0x72, 0xaf, 0xfd, 0x6a, 0xec, 0x5e, 0x7b, 0x2a, 0x21,
|
||||||
0x1c, 0x0c, 0x35, 0x9a, 0x8a, 0x46, 0x4e, 0xa1, 0xcd, 0x7a, 0x3f, 0xd2, 0x66, 0xf5, 0xfa, 0x27,
|
0xf7, 0x82, 0x6e, 0xb5, 0xff, 0x4b, 0x82, 0x99, 0xf4, 0x0f, 0x0c, 0xf4, 0x66, 0xc4, 0xc2, 0x4a,
|
||||||
0x0b, 0x6e, 0x55, 0x66, 0x87, 0x85, 0x63, 0x1d, 0xd6, 0xeb, 0x7d, 0xa1, 0x1d, 0xdf, 0x5c, 0xfd,
|
0xcc, 0xc2, 0x89, 0x98, 0x94, 0xb0, 0xef, 0x23, 0x18, 0x17, 0x9f, 0x21, 0x02, 0x46, 0x78, 0x55,
|
||||||
0x01, 0x8c, 0xf8, 0x4a, 0x07, 0xcb, 0xf9, 0xf2, 0xbf, 0xe6, 0x60, 0x34, 0xa4, 0x62, 0xc0, 0x8a,
|
0x4e, 0xcb, 0x95, 0x02, 0xc2, 0x6b, 0xbb, 0xf9, 0x7a, 0x45, 0xc7, 0x70, 0x0c, 0x4d, 0xfe, 0xdb,
|
||||||
0xb1, 0x13, 0xa9, 0xb4, 0xfd, 0xfc, 0x6b, 0x5b, 0x48, 0x57, 0xd5, 0xab, 0xad, 0xee, 0x23, 0xc1,
|
0x1c, 0x0c, 0x35, 0x9a, 0x8a, 0x46, 0x4e, 0xa1, 0xcd, 0x7a, 0x3f, 0xd2, 0x66, 0xf5, 0xfa, 0x17,
|
||||||
0xe0, 0x59, 0x58, 0xb2, 0xe4, 0xbe, 0x0b, 0xe3, 0x94, 0xff, 0xeb, 0x97, 0x7f, 0x0c, 0x99, 0xe7,
|
0x0f, 0x6e, 0x55, 0x66, 0x87, 0x85, 0x63, 0x1d, 0xd6, 0xeb, 0x7d, 0xa1, 0x1d, 0xdf, 0x5c, 0xfd,
|
||||||
0xb1, 0xe8, 0x3f, 0x2d, 0xdd, 0x8c, 0x50, 0x71, 0x8c, 0x7b, 0xee, 0x0e, 0x8c, 0x45, 0x94, 0x0d,
|
0x01, 0x8c, 0xf8, 0x4a, 0x07, 0xcb, 0xf9, 0xf2, 0xbf, 0xe7, 0x60, 0x34, 0xa4, 0x62, 0xc0, 0x8a,
|
||||||
0xf4, 0xc6, 0xef, 0xbf, 0x25, 0x78, 0xb5, 0xe7, 0x47, 0x22, 0xaa, 0x47, 0x36, 0x49, 0x35, 0xb6,
|
0xb1, 0x13, 0xa9, 0xb4, 0xfd, 0xfc, 0x63, 0x5d, 0x48, 0x57, 0xd5, 0xab, 0xad, 0xee, 0x13, 0xc5,
|
||||||
0x49, 0xe6, 0xb3, 0x01, 0x5e, 0xdc, 0x5b, 0x91, 0xfa, 0xb5, 0xa7, 0xdf, 0xcf, 0x9f, 0xf9, 0xf6,
|
0xe0, 0x51, 0x5a, 0xb2, 0xe4, 0xbe, 0x0b, 0xe3, 0x94, 0xff, 0xe3, 0x99, 0x7f, 0x08, 0x9a, 0xe7,
|
||||||
0xfb, 0xf9, 0x33, 0xdf, 0x7d, 0x3f, 0x7f, 0xe6, 0x2f, 0x8f, 0xe6, 0xa5, 0xa7, 0x47, 0xf3, 0xd2,
|
0xb1, 0xe8, 0x3f, 0x6c, 0xdd, 0x8c, 0x50, 0x71, 0x8c, 0x7b, 0xee, 0x0e, 0x8c, 0x45, 0x94, 0x0d,
|
||||||
0xb7, 0x47, 0xf3, 0xd2, 0x77, 0x47, 0xf3, 0xd2, 0xcf, 0x8e, 0xe6, 0xa5, 0x7f, 0xf8, 0x61, 0xfe,
|
0xf4, 0xc2, 0xf0, 0x7f, 0x25, 0x78, 0xb5, 0xe7, 0x27, 0x2a, 0xaa, 0x47, 0x36, 0x49, 0x35, 0xb6,
|
||||||
0xcc, 0x87, 0x45, 0x01, 0xf7, 0xeb, 0x00, 0x00, 0x00, 0xff, 0xff, 0x22, 0xba, 0x16, 0x0c, 0x99,
|
0x49, 0xe6, 0xb3, 0x01, 0x5e, 0xdc, 0x4b, 0x95, 0xfa, 0xb5, 0xa7, 0xdf, 0xcd, 0x9f, 0xf9, 0xe6,
|
||||||
0x3b, 0x00, 0x00,
|
0xbb, 0xf9, 0x33, 0xdf, 0x7e, 0x37, 0x7f, 0xe6, 0xaf, 0x8e, 0xe6, 0xa5, 0xa7, 0x47, 0xf3, 0xd2,
|
||||||
|
0x37, 0x47, 0xf3, 0xd2, 0xb7, 0x47, 0xf3, 0xd2, 0xcf, 0x8e, 0xe6, 0xa5, 0x7f, 0xfc, 0x7e, 0xfe,
|
||||||
|
0xcc, 0x87, 0x45, 0x01, 0xf7, 0xeb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb1, 0xb3, 0xc8, 0xe2, 0x54,
|
||||||
|
0x3c, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,6 +32,12 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
|
|||||||
// Package-wide variables from generator "generated".
|
// Package-wide variables from generator "generated".
|
||||||
option go_package = "v1beta1";
|
option go_package = "v1beta1";
|
||||||
|
|
||||||
|
// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
|
||||||
|
message AllowedFlexVolume {
|
||||||
|
// Driver is the name of the Flexvolume driver.
|
||||||
|
optional string driver = 1;
|
||||||
|
}
|
||||||
|
|
||||||
// defines the host volume conditions that will be enabled by a policy
|
// defines the host volume conditions that will be enabled by a policy
|
||||||
// for pods to use. It requires the path prefix to be defined.
|
// for pods to use. It requires the path prefix to be defined.
|
||||||
message AllowedHostPath {
|
message AllowedHostPath {
|
||||||
@@ -843,6 +849,12 @@ message PodSecurityPolicySpec {
|
|||||||
// is a white list of allowed host paths. Empty indicates that all host paths may be used.
|
// is a white list of allowed host paths. Empty indicates that all host paths may be used.
|
||||||
// +optional
|
// +optional
|
||||||
repeated AllowedHostPath allowedHostPaths = 17;
|
repeated AllowedHostPath allowedHostPaths = 17;
|
||||||
|
|
||||||
|
// AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all
|
||||||
|
// Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes
|
||||||
|
// is allowed in the "Volumes" field.
|
||||||
|
// +optional
|
||||||
|
repeated AllowedFlexVolume allowedFlexVolumes = 18;
|
||||||
}
|
}
|
||||||
|
|
||||||
// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for
|
// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for
|
||||||
|
|||||||
@@ -938,6 +938,11 @@ type PodSecurityPolicySpec struct {
|
|||||||
// is a white list of allowed host paths. Empty indicates that all host paths may be used.
|
// is a white list of allowed host paths. Empty indicates that all host paths may be used.
|
||||||
// +optional
|
// +optional
|
||||||
AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"`
|
AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"`
|
||||||
|
// AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all
|
||||||
|
// Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes
|
||||||
|
// is allowed in the "Volumes" field.
|
||||||
|
// +optional
|
||||||
|
AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// defines the host volume conditions that will be enabled by a policy
|
// defines the host volume conditions that will be enabled by a policy
|
||||||
@@ -981,6 +986,12 @@ var (
|
|||||||
All FSType = "*"
|
All FSType = "*"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
|
||||||
|
type AllowedFlexVolume struct {
|
||||||
|
// Driver is the name of the Flexvolume driver.
|
||||||
|
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
|
||||||
|
}
|
||||||
|
|
||||||
// Host Port Range defines a range of host ports that will be enabled by a policy
|
// Host Port Range defines a range of host ports that will be enabled by a policy
|
||||||
// for pods to use. It requires both the start and end to be defined.
|
// for pods to use. It requires both the start and end to be defined.
|
||||||
type HostPortRange struct {
|
type HostPortRange struct {
|
||||||
|
|||||||
@@ -27,6 +27,15 @@ package v1beta1
|
|||||||
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
||||||
|
|
||||||
// AUTO-GENERATED FUNCTIONS START HERE
|
// AUTO-GENERATED FUNCTIONS START HERE
|
||||||
|
var map_AllowedFlexVolume = map[string]string{
|
||||||
|
"": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.",
|
||||||
|
"driver": "Driver is the name of the Flexvolume driver.",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (AllowedFlexVolume) SwaggerDoc() map[string]string {
|
||||||
|
return map_AllowedFlexVolume
|
||||||
|
}
|
||||||
|
|
||||||
var map_AllowedHostPath = map[string]string{
|
var map_AllowedHostPath = map[string]string{
|
||||||
"": "defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.",
|
"": "defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.",
|
||||||
"pathPrefix": "is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`",
|
"pathPrefix": "is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`",
|
||||||
@@ -465,6 +474,7 @@ var map_PodSecurityPolicySpec = map[string]string{
|
|||||||
"defaultAllowPrivilegeEscalation": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.",
|
"defaultAllowPrivilegeEscalation": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.",
|
||||||
"allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.",
|
"allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.",
|
||||||
"allowedHostPaths": "is a white list of allowed host paths. Empty indicates that all host paths may be used.",
|
"allowedHostPaths": "is a white list of allowed host paths. Empty indicates that all host paths may be used.",
|
||||||
|
"allowedFlexVolumes": "AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PodSecurityPolicySpec) SwaggerDoc() map[string]string {
|
func (PodSecurityPolicySpec) SwaggerDoc() map[string]string {
|
||||||
|
|||||||
@@ -27,6 +27,22 @@ import (
|
|||||||
intstr "k8s.io/apimachinery/pkg/util/intstr"
|
intstr "k8s.io/apimachinery/pkg/util/intstr"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) {
|
||||||
|
*out = *in
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume.
|
||||||
|
func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(AllowedFlexVolume)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) {
|
func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -1194,6 +1210,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) {
|
|||||||
*out = make([]AllowedHostPath, len(*in))
|
*out = make([]AllowedHostPath, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
|
if in.AllowedFlexVolumes != nil {
|
||||||
|
in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes
|
||||||
|
*out = make([]AllowedFlexVolume, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,16 +5,22 @@ go_library(
|
|||||||
srcs = [
|
srcs = [
|
||||||
"controllerrevision.go",
|
"controllerrevision.go",
|
||||||
"daemonset.go",
|
"daemonset.go",
|
||||||
|
"daemonset_expansion.go",
|
||||||
"deployment.go",
|
"deployment.go",
|
||||||
|
"deployment_expansion.go",
|
||||||
"expansion_generated.go",
|
"expansion_generated.go",
|
||||||
"replicaset.go",
|
"replicaset.go",
|
||||||
|
"replicaset_expansion.go",
|
||||||
"statefulset.go",
|
"statefulset.go",
|
||||||
|
"statefulset_expansion.go",
|
||||||
],
|
],
|
||||||
importpath = "k8s.io/client-go/listers/apps/v1",
|
importpath = "k8s.io/client-go/listers/apps/v1",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
],
|
],
|
||||||
|
|||||||
@@ -0,0 +1,113 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
apps "k8s.io/api/apps/v1"
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DaemonSetListerExpansion allows custom methods to be added to
|
||||||
|
// DaemonSetLister.
|
||||||
|
type DaemonSetListerExpansion interface {
|
||||||
|
GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error)
|
||||||
|
GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DaemonSetNamespaceListerExpansion allows custom methods to be added to
|
||||||
|
// DaemonSetNamespaceLister.
|
||||||
|
type DaemonSetNamespaceListerExpansion interface{}
|
||||||
|
|
||||||
|
// GetPodDaemonSets returns a list of DaemonSets that potentially match a pod.
|
||||||
|
// Only the one specified in the Pod's ControllerRef will actually manage it.
|
||||||
|
// Returns an error only if no matching DaemonSets are found.
|
||||||
|
func (s *daemonSetLister) GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) {
|
||||||
|
var selector labels.Selector
|
||||||
|
var daemonSet *apps.DaemonSet
|
||||||
|
|
||||||
|
if len(pod.Labels) == 0 {
|
||||||
|
return nil, fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
list, err := s.DaemonSets(pod.Namespace).List(labels.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var daemonSets []*apps.DaemonSet
|
||||||
|
for i := range list {
|
||||||
|
daemonSet = list[i]
|
||||||
|
if daemonSet.Namespace != pod.Namespace {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
selector, err = metav1.LabelSelectorAsSelector(daemonSet.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
// this should not happen if the DaemonSet passed validation
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything.
|
||||||
|
if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
daemonSets = append(daemonSets, daemonSet)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(daemonSets) == 0 {
|
||||||
|
return nil, fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
return daemonSets, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHistoryDaemonSets returns a list of DaemonSets that potentially
|
||||||
|
// match a ControllerRevision. Only the one specified in the ControllerRevision's ControllerRef
|
||||||
|
// will actually manage it.
|
||||||
|
// Returns an error only if no matching DaemonSets are found.
|
||||||
|
func (s *daemonSetLister) GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) {
|
||||||
|
if len(history.Labels) == 0 {
|
||||||
|
return nil, fmt.Errorf("no DaemonSet found for ControllerRevision %s because it has no labels", history.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
list, err := s.DaemonSets(history.Namespace).List(labels.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var daemonSets []*apps.DaemonSet
|
||||||
|
for _, ds := range list {
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid label selector: %v", err)
|
||||||
|
}
|
||||||
|
// If a DaemonSet with a nil or empty selector creeps in, it should match nothing, not everything.
|
||||||
|
if selector.Empty() || !selector.Matches(labels.Set(history.Labels)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
daemonSets = append(daemonSets, ds)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(daemonSets) == 0 {
|
||||||
|
return nil, fmt.Errorf("could not find DaemonSets for ControllerRevision %s in namespace %s with labels: %v", history.Name, history.Namespace, history.Labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
return daemonSets, nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,70 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
apps "k8s.io/api/apps/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeploymentListerExpansion allows custom methods to be added to
|
||||||
|
// DeploymentLister.
|
||||||
|
type DeploymentListerExpansion interface {
|
||||||
|
GetDeploymentsForReplicaSet(rs *apps.ReplicaSet) ([]*apps.Deployment, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeploymentNamespaceListerExpansion allows custom methods to be added to
|
||||||
|
// DeploymentNamespaceLister.
|
||||||
|
type DeploymentNamespaceListerExpansion interface{}
|
||||||
|
|
||||||
|
// GetDeploymentsForReplicaSet returns a list of Deployments that potentially
|
||||||
|
// match a ReplicaSet. Only the one specified in the ReplicaSet's ControllerRef
|
||||||
|
// will actually manage it.
|
||||||
|
// Returns an error only if no matching Deployments are found.
|
||||||
|
func (s *deploymentLister) GetDeploymentsForReplicaSet(rs *apps.ReplicaSet) ([]*apps.Deployment, error) {
|
||||||
|
if len(rs.Labels) == 0 {
|
||||||
|
return nil, fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label
|
||||||
|
dList, err := s.Deployments(rs.Namespace).List(labels.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var deployments []*apps.Deployment
|
||||||
|
for _, d := range dList {
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid label selector: %v", err)
|
||||||
|
}
|
||||||
|
// If a deployment with a nil or empty selector creeps in, it should match nothing, not everything.
|
||||||
|
if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
deployments = append(deployments, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(deployments) == 0 {
|
||||||
|
return nil, fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
return deployments, nil
|
||||||
|
}
|
||||||
@@ -25,35 +25,3 @@ type ControllerRevisionListerExpansion interface{}
|
|||||||
// ControllerRevisionNamespaceListerExpansion allows custom methods to be added to
|
// ControllerRevisionNamespaceListerExpansion allows custom methods to be added to
|
||||||
// ControllerRevisionNamespaceLister.
|
// ControllerRevisionNamespaceLister.
|
||||||
type ControllerRevisionNamespaceListerExpansion interface{}
|
type ControllerRevisionNamespaceListerExpansion interface{}
|
||||||
|
|
||||||
// DaemonSetListerExpansion allows custom methods to be added to
|
|
||||||
// DaemonSetLister.
|
|
||||||
type DaemonSetListerExpansion interface{}
|
|
||||||
|
|
||||||
// DaemonSetNamespaceListerExpansion allows custom methods to be added to
|
|
||||||
// DaemonSetNamespaceLister.
|
|
||||||
type DaemonSetNamespaceListerExpansion interface{}
|
|
||||||
|
|
||||||
// DeploymentListerExpansion allows custom methods to be added to
|
|
||||||
// DeploymentLister.
|
|
||||||
type DeploymentListerExpansion interface{}
|
|
||||||
|
|
||||||
// DeploymentNamespaceListerExpansion allows custom methods to be added to
|
|
||||||
// DeploymentNamespaceLister.
|
|
||||||
type DeploymentNamespaceListerExpansion interface{}
|
|
||||||
|
|
||||||
// ReplicaSetListerExpansion allows custom methods to be added to
|
|
||||||
// ReplicaSetLister.
|
|
||||||
type ReplicaSetListerExpansion interface{}
|
|
||||||
|
|
||||||
// ReplicaSetNamespaceListerExpansion allows custom methods to be added to
|
|
||||||
// ReplicaSetNamespaceLister.
|
|
||||||
type ReplicaSetNamespaceListerExpansion interface{}
|
|
||||||
|
|
||||||
// StatefulSetListerExpansion allows custom methods to be added to
|
|
||||||
// StatefulSetLister.
|
|
||||||
type StatefulSetListerExpansion interface{}
|
|
||||||
|
|
||||||
// StatefulSetNamespaceListerExpansion allows custom methods to be added to
|
|
||||||
// StatefulSetNamespaceLister.
|
|
||||||
type StatefulSetNamespaceListerExpansion interface{}
|
|
||||||
|
|||||||
@@ -0,0 +1,73 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
apps "k8s.io/api/apps/v1"
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReplicaSetListerExpansion allows custom methods to be added to
|
||||||
|
// ReplicaSetLister.
|
||||||
|
type ReplicaSetListerExpansion interface {
|
||||||
|
GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplicaSetNamespaceListerExpansion allows custom methods to be added to
|
||||||
|
// ReplicaSetNamespaceLister.
|
||||||
|
type ReplicaSetNamespaceListerExpansion interface{}
|
||||||
|
|
||||||
|
// GetPodReplicaSets returns a list of ReplicaSets that potentially match a pod.
|
||||||
|
// Only the one specified in the Pod's ControllerRef will actually manage it.
|
||||||
|
// Returns an error only if no matching ReplicaSets are found.
|
||||||
|
func (s *replicaSetLister) GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) {
|
||||||
|
if len(pod.Labels) == 0 {
|
||||||
|
return nil, fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
list, err := s.ReplicaSets(pod.Namespace).List(labels.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var rss []*apps.ReplicaSet
|
||||||
|
for _, rs := range list {
|
||||||
|
if rs.Namespace != pod.Namespace {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid selector: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything.
|
||||||
|
if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rss = append(rss, rs)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rss) == 0 {
|
||||||
|
return nil, fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rss, nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,77 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
apps "k8s.io/api/apps/v1"
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StatefulSetListerExpansion allows custom methods to be added to
|
||||||
|
// StatefulSetLister.
|
||||||
|
type StatefulSetListerExpansion interface {
|
||||||
|
GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatefulSetNamespaceListerExpansion allows custom methods to be added to
|
||||||
|
// StatefulSetNamespaceLister.
|
||||||
|
type StatefulSetNamespaceListerExpansion interface{}
|
||||||
|
|
||||||
|
// GetPodStatefulSets returns a list of StatefulSets that potentially match a pod.
|
||||||
|
// Only the one specified in the Pod's ControllerRef will actually manage it.
|
||||||
|
// Returns an error only if no matching StatefulSets are found.
|
||||||
|
func (s *statefulSetLister) GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) {
|
||||||
|
var selector labels.Selector
|
||||||
|
var ps *apps.StatefulSet
|
||||||
|
|
||||||
|
if len(pod.Labels) == 0 {
|
||||||
|
return nil, fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
list, err := s.StatefulSets(pod.Namespace).List(labels.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var psList []*apps.StatefulSet
|
||||||
|
for i := range list {
|
||||||
|
ps = list[i]
|
||||||
|
if ps.Namespace != pod.Namespace {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
selector, err = metav1.LabelSelectorAsSelector(ps.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid selector: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything.
|
||||||
|
if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
psList = append(psList, ps)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(psList) == 0 {
|
||||||
|
return nil, fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
return psList, nil
|
||||||
|
}
|
||||||
@@ -200,7 +200,7 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC
|
|||||||
MemoryConstraint: 100 * (1024 * 1024),
|
MemoryConstraint: 100 * (1024 * 1024),
|
||||||
}
|
}
|
||||||
constraints["l7-lb-controller"] = framework.ResourceConstraint{
|
constraints["l7-lb-controller"] = framework.ResourceConstraint{
|
||||||
CPUConstraint: 0.2 + 0.0001*float64(numNodes),
|
CPUConstraint: 0.2 + 0.00015*float64(numNodes),
|
||||||
MemoryConstraint: (75 + uint64(math.Ceil(0.8*float64(numNodes)))) * (1024 * 1024),
|
MemoryConstraint: (75 + uint64(math.Ceil(0.8*float64(numNodes)))) * (1024 * 1024),
|
||||||
}
|
}
|
||||||
constraints["influxdb"] = framework.ResourceConstraint{
|
constraints["influxdb"] = framework.ResourceConstraint{
|
||||||
|
|||||||
Reference in New Issue
Block a user