Merge branch 'master' into fix_shellcheck_failure_make-rules
This commit is contained in:
42
test/OWNERS
42
test/OWNERS
@@ -6,31 +6,20 @@ reviewers:
|
||||
- MrHohn
|
||||
- deads2k
|
||||
- enisoc
|
||||
- enj # for test/integration/etcd/etcd_storage_path_test.go
|
||||
- erictune
|
||||
- foxish # for test/e2e/network-partition.go
|
||||
- gmarek
|
||||
- janetkuo
|
||||
- kow3ns # for test/e2e/statefulset.go
|
||||
- krousey
|
||||
- liggitt
|
||||
- madhusudancs
|
||||
- marun
|
||||
- mikedanese
|
||||
- msau42 # for test/e2e/commmon/{*volume,empty_dir}.go and test/e2e/framework/{pv|volume}_util.go
|
||||
- msau42
|
||||
- ncdc
|
||||
- pwittrock # for test/e2e/kubectl.go
|
||||
- saad-ali
|
||||
- shyamjvs
|
||||
- smarterclayton
|
||||
- soltysh
|
||||
- sig-testing-reviewers
|
||||
- sttts
|
||||
- timothysc
|
||||
- zmerlynn
|
||||
- vishh
|
||||
- MaciekPytel # for test/e2e/common/autoscaling_utils.go
|
||||
- oomichi
|
||||
- xichengliudui
|
||||
- andrewsykim
|
||||
- neolit123
|
||||
@@ -44,31 +33,34 @@ approvers:
|
||||
- MrHohn
|
||||
- deads2k
|
||||
- enisoc
|
||||
- enj # for test/integration/etcd/etcd_storage_path_test.go
|
||||
- eparis
|
||||
- erictune
|
||||
- foxish # for test/e2e/network-partition.go
|
||||
- gmarek
|
||||
- janetkuo
|
||||
- kow3ns # for test/e2e/statefulset.go
|
||||
- krousey
|
||||
- liggitt
|
||||
- madhusudancs
|
||||
- marun
|
||||
- mikedanese
|
||||
- msau42 # for test/e2e/commmon/{*volume,empty_dir}.go and test/e2e/framework/{pv|volume}_util.go
|
||||
- msau42
|
||||
- ncdc
|
||||
- oomichi
|
||||
- pwittrock # for test/e2e/kubectl.go
|
||||
- saad-ali
|
||||
- shyamjvs
|
||||
- sig-testing-approvers
|
||||
- smarterclayton
|
||||
- soltysh
|
||||
- sttts
|
||||
- timothysc
|
||||
- zmerlynn
|
||||
- vishh
|
||||
- MaciekPytel # for test/e2e/common/autoscaling_utils.go
|
||||
emeritus_approvers:
|
||||
- enj
|
||||
- eparis
|
||||
- erictune
|
||||
- foxish
|
||||
- gmarek
|
||||
- krousey
|
||||
- kow3ns
|
||||
- madhusudancs
|
||||
- marun
|
||||
- soltysh
|
||||
- zmerlynn
|
||||
- MaciekPytel
|
||||
|
||||
labels:
|
||||
- area/test
|
||||
- sig/testing
|
||||
|
||||
@@ -27,75 +27,75 @@ run_kubectl_apply_tests() {
|
||||
kube::log::status "Testing kubectl apply"
|
||||
## kubectl apply should create the resource that doesn't exist yet
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
|
||||
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
# Post-Condition: pod "test-pod" is created
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
|
||||
# Post-Condition: pod "test-pod" has configuration annotation
|
||||
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")"
|
||||
# Clean up
|
||||
kubectl delete pods test-pod "${kube_flags[@]}"
|
||||
kubectl delete pods test-pod "${kube_flags[@]:?}"
|
||||
|
||||
|
||||
## kubectl apply should be able to clear defaulted fields.
|
||||
# Pre-Condition: no deployment exists
|
||||
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command: apply a deployment "test-deployment-retainkeys" (doesn't exist) should create this deployment
|
||||
kubectl apply -f hack/testdata/retainKeys/deployment/deployment-before.yaml "${kube_flags[@]}"
|
||||
kubectl apply -f hack/testdata/retainKeys/deployment/deployment-before.yaml "${kube_flags[@]:?}"
|
||||
# Post-Condition: deployment "test-deployment-retainkeys" created
|
||||
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}{{end}}" 'test-deployment-retainkeys'
|
||||
kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}{{end}}" 'test-deployment-retainkeys'
|
||||
# Post-Condition: deployment "test-deployment-retainkeys" has defaulted fields
|
||||
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]]
|
||||
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxSurge)" ]]
|
||||
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxUnavailable)" ]]
|
||||
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]]
|
||||
grep -q RollingUpdate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
|
||||
grep -q maxSurge <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
|
||||
grep -q maxUnavailable <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
|
||||
grep -q emptyDir <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
|
||||
# Command: apply a deployment "test-deployment-retainkeys" should clear
|
||||
# defaulted fields and successfully update the deployment
|
||||
[[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]}")" ]]
|
||||
[[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]:?}")" ]]
|
||||
# Post-Condition: deployment "test-deployment-retainkeys" has updated fields
|
||||
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep Recreate)" ]]
|
||||
! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]]
|
||||
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep hostPath)" ]]
|
||||
! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]]
|
||||
grep -q Recreate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
|
||||
! grep -q RollingUpdate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
|
||||
grep -q hostPath <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
|
||||
! grep -q emptyDir <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
|
||||
# Clean up
|
||||
kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]}"
|
||||
kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]:?}"
|
||||
|
||||
|
||||
## kubectl apply -f with label selector should only apply matching objects
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# apply
|
||||
kubectl apply -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
|
||||
kubectl apply -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]:?}"
|
||||
# check right pod exists
|
||||
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
|
||||
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field:?}.name}}" 'selector-test-pod'
|
||||
# check wrong pod doesn't exist
|
||||
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
|
||||
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
|
||||
# cleanup
|
||||
kubectl delete pods selector-test-pod
|
||||
|
||||
## kubectl apply --server-dry-run
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
# apply dry-run
|
||||
kubectl apply --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
kubectl apply --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
# No pod exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# apply non dry-run creates the pod
|
||||
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
# apply changes
|
||||
kubectl apply --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]}"
|
||||
kubectl apply --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}"
|
||||
# Post-Condition: label still has initial value
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
|
||||
|
||||
# clean-up
|
||||
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
|
||||
## kubectl apply dry-run on CR
|
||||
# Create CRD
|
||||
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
|
||||
kubectl "${kube_flags_with_token[@]:?}" create -f - << __EOF__
|
||||
{
|
||||
"kind": "CustomResourceDefinition",
|
||||
"apiVersion": "apiextensions.k8s.io/v1beta1",
|
||||
@@ -117,31 +117,31 @@ run_kubectl_apply_tests() {
|
||||
__EOF__
|
||||
|
||||
# Dry-run create the CR
|
||||
kubectl "${kube_flags[@]}" apply --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]}"
|
||||
kubectl "${kube_flags[@]:?}" apply --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}"
|
||||
# Make sure that the CR doesn't exist
|
||||
! kubectl "${kube_flags[@]}" get resource/myobj
|
||||
! kubectl "${kube_flags[@]:?}" get resource/myobj
|
||||
|
||||
# clean-up
|
||||
kubectl "${kube_flags[@]}" delete customresourcedefinition resources.mygroup.example.com
|
||||
kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com
|
||||
|
||||
## kubectl apply --prune
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
# apply a
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}"
|
||||
# check right pod exists
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a'
|
||||
# check wrong pod doesn't exist
|
||||
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
|
||||
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'pods "b" not found'
|
||||
|
||||
# apply b
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]:?}"
|
||||
# check right pod exists
|
||||
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
|
||||
kube::test::get_object_assert 'pods b' "{{${id_field:?}}}" 'b'
|
||||
# check wrong pod doesn't exist
|
||||
output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]}")
|
||||
output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'pods "a" not found'
|
||||
|
||||
# cleanup
|
||||
@@ -149,79 +149,79 @@ __EOF__
|
||||
|
||||
# same thing without prune for a sanity check
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
# apply a
|
||||
kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
|
||||
kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}"
|
||||
# check right pod exists
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a'
|
||||
# check wrong pod doesn't exist
|
||||
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
|
||||
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'pods "b" not found'
|
||||
|
||||
# apply b
|
||||
kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
|
||||
kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]:?}"
|
||||
# check both pods exist
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
|
||||
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a'
|
||||
kube::test::get_object_assert 'pods b' "{{${id_field:?}}}" 'b'
|
||||
# check wrong pod doesn't exist
|
||||
|
||||
# cleanup
|
||||
kubectl delete pod/a pod/b
|
||||
|
||||
## kubectl apply --prune requires a --all flag to select everything
|
||||
output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]}")
|
||||
output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" \
|
||||
'all resources selected for prune without explicitly passing --all'
|
||||
# should apply everything
|
||||
kubectl apply --all --prune -f hack/testdata/prune
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
|
||||
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a'
|
||||
kube::test::get_object_assert 'pods b' "{{${id_field:?}}}" 'b'
|
||||
kubectl delete pod/a pod/b
|
||||
|
||||
## kubectl apply --prune should fallback to delete for non reapable types
|
||||
kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'pvc a-pvc' "{{${id_field}}}" 'a-pvc'
|
||||
kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'pvc b-pvc' "{{${id_field}}}" 'b-pvc'
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kubectl delete pvc b-pvc 2>&1 "${kube_flags[@]}"
|
||||
kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert 'pvc a-pvc' "{{${id_field:?}}}" 'a-pvc'
|
||||
kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert 'pvc b-pvc' "{{${id_field:?}}}" 'b-pvc'
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
kubectl delete pvc b-pvc 2>&1 "${kube_flags[@]:?}"
|
||||
|
||||
## kubectl apply --prune --prune-whitelist
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# apply pod a
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}"
|
||||
# check right pod exists
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a'
|
||||
# apply svc and don't prune pod a by overwriting whitelist
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-whitelist core/v1/Service 2>&1 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-whitelist core/v1/Service 2>&1 "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert 'service prune-svc' "{{${id_field:?}}}" 'prune-svc'
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a'
|
||||
# apply svc and prune pod a with default whitelist
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert 'service prune-svc' "{{${id_field:?}}}" 'prune-svc'
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# cleanup
|
||||
kubectl delete svc prune-svc 2>&1 "${kube_flags[@]}"
|
||||
kubectl delete svc prune-svc 2>&1 "${kube_flags[@]:?}"
|
||||
|
||||
|
||||
## kubectl apply -f some.yml --force
|
||||
# Pre-condition: no service exists
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert services "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# apply service a
|
||||
kubectl apply -f hack/testdata/service-revision1.yaml "${kube_flags[@]}"
|
||||
kubectl apply -f hack/testdata/service-revision1.yaml "${kube_flags[@]:?}"
|
||||
# check right service exists
|
||||
kube::test::get_object_assert 'services a' "{{${id_field}}}" 'a'
|
||||
kube::test::get_object_assert 'services a' "{{${id_field:?}}}" 'a'
|
||||
# change immutable field and apply service a
|
||||
output_message=$(! kubectl apply -f hack/testdata/service-revision2.yaml 2>&1 "${kube_flags[@]}")
|
||||
output_message=$(! kubectl apply -f hack/testdata/service-revision2.yaml 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'field is immutable'
|
||||
# apply --force to recreate resources for immutable fields
|
||||
kubectl apply -f hack/testdata/service-revision2.yaml --force "${kube_flags[@]}"
|
||||
kubectl apply -f hack/testdata/service-revision2.yaml --force "${kube_flags[@]:?}"
|
||||
# check immutable field exists
|
||||
kube::test::get_object_assert 'services a' "{{.spec.clusterIP}}" '10.0.0.12'
|
||||
# cleanup
|
||||
kubectl delete -f hack/testdata/service-revision2.yaml "${kube_flags[@]}"
|
||||
kubectl delete -f hack/testdata/service-revision2.yaml "${kube_flags[@]:?}"
|
||||
|
||||
## kubectl apply -k somedir
|
||||
kubectl apply -k hack/testdata/kustomize
|
||||
@@ -252,31 +252,31 @@ run_kubectl_apply_tests() {
|
||||
kube::log::status "Testing kubectl apply --experimental-server-side"
|
||||
## kubectl apply should create the resource that doesn't exist yet
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
|
||||
kubectl apply --experimental-server-side -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
kubectl apply --experimental-server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
# Post-Condition: pod "test-pod" is created
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
|
||||
# Clean up
|
||||
kubectl delete pods test-pod "${kube_flags[@]}"
|
||||
kubectl delete pods test-pod "${kube_flags[@]:?}"
|
||||
|
||||
## kubectl apply --server-dry-run
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
# apply dry-run
|
||||
kubectl apply --experimental-server-side --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
kubectl apply --experimental-server-side --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
# No pod exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# apply non dry-run creates the pod
|
||||
kubectl apply --experimental-server-side -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
kubectl apply --experimental-server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
# apply changes
|
||||
kubectl apply --experimental-server-side --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]}"
|
||||
kubectl apply --experimental-server-side --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}"
|
||||
# Post-Condition: label still has initial value
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
|
||||
|
||||
# clean-up
|
||||
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
|
||||
|
||||
## kubectl apply dry-run on CR
|
||||
# Create CRD
|
||||
@@ -302,12 +302,12 @@ run_kubectl_apply_tests() {
|
||||
__EOF__
|
||||
|
||||
# Dry-run create the CR
|
||||
kubectl "${kube_flags[@]}" apply --experimental-server-side --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]}"
|
||||
kubectl "${kube_flags[@]:?}" apply --experimental-server-side --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}"
|
||||
# Make sure that the CR doesn't exist
|
||||
! kubectl "${kube_flags[@]}" get resource/myobj
|
||||
! kubectl "${kube_flags[@]:?}" get resource/myobj
|
||||
|
||||
# clean-up
|
||||
kubectl "${kube_flags[@]}" delete customresourcedefinition resources.mygroup.example.com
|
||||
kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
|
||||
500
test/cmd/apps.sh
500
test/cmd/apps.sh
@@ -27,24 +27,28 @@ run_daemonset_tests() {
|
||||
|
||||
### Create a rolling update DaemonSet
|
||||
# Pre-condition: no DaemonSet exists
|
||||
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert daemonsets "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
|
||||
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
|
||||
# Template Generation should be 1
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
|
||||
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '1'
|
||||
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
|
||||
# Template Generation should stay 1
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '1'
|
||||
# Test set commands
|
||||
kubectl set image daemonsets/bind "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '2'
|
||||
kubectl set env daemonsets/bind "${kube_flags[@]}" foo=bar
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '3'
|
||||
kubectl set resources daemonsets/bind "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '4'
|
||||
kubectl set image daemonsets/bind "${kube_flags[@]:?}" "*=k8s.gcr.io/pause:test-cmd"
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '2'
|
||||
kubectl set env daemonsets/bind "${kube_flags[@]:?}" foo=bar
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '3'
|
||||
kubectl set resources daemonsets/bind "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '4'
|
||||
|
||||
# Rollout restart should change generation
|
||||
kubectl rollout restart daemonset/bind "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '5'
|
||||
|
||||
# Clean up
|
||||
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
|
||||
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
@@ -59,42 +63,42 @@ run_daemonset_history_tests() {
|
||||
|
||||
### Test rolling back a DaemonSet
|
||||
# Pre-condition: no DaemonSet or its pods exists
|
||||
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert daemonsets "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
# Create a DaemonSet (revision 1)
|
||||
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*"
|
||||
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]:?}"
|
||||
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*"
|
||||
# Rollback to revision 1 - should be no-op
|
||||
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
|
||||
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
|
||||
# Update the DaemonSet (revision 2)
|
||||
kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
|
||||
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*"
|
||||
kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]:?}"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
|
||||
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*"
|
||||
# Rollback to revision 1 with dry-run - should be no-op
|
||||
kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]}"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
|
||||
kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
|
||||
# Rollback to revision 1
|
||||
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
|
||||
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]:?}"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
|
||||
# Rollback to revision 1000000 - should fail
|
||||
output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
|
||||
output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]:?}" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" "unable to find specified revision"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
|
||||
# Rollback to last revision
|
||||
kubectl rollout undo daemonset "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
|
||||
kubectl rollout undo daemonset "${kube_flags[@]:?}"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
|
||||
# Clean up
|
||||
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
|
||||
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
@@ -108,20 +112,20 @@ run_kubectl_apply_deployments_tests() {
|
||||
kube::log::status "Testing kubectl apply deployments"
|
||||
## kubectl apply should propagate user defined null values
|
||||
# Pre-Condition: no Deployments, ReplicaSets, Pods exist
|
||||
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
kube::test::get_object_assert replicasets "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# apply base deployment
|
||||
kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]}"
|
||||
kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]:?}"
|
||||
# check right deployment exists
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{${id_field}}}" 'my-depl'
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{${id_field:?}}}" 'my-depl'
|
||||
# check right labels exists
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1'
|
||||
|
||||
# apply new deployment with new template labels
|
||||
kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]}"
|
||||
kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]:?}"
|
||||
# check right labels exists
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" '<no value>'
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" '<no value>'
|
||||
@@ -134,24 +138,24 @@ run_kubectl_apply_deployments_tests() {
|
||||
# need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
|
||||
kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0
|
||||
# Post-Condition: no Deployments, ReplicaSets, Pods exist
|
||||
kube::test::wait_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::wait_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::wait_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
kube::test::wait_object_assert replicasets "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
# kubectl apply deployment --overwrite=true --force=true
|
||||
# Pre-Condition: no deployment exists
|
||||
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# apply deployment nginx
|
||||
kubectl apply -f hack/testdata/deployment-label-change1.yaml "${kube_flags[@]}"
|
||||
kubectl apply -f hack/testdata/deployment-label-change1.yaml "${kube_flags[@]:?}"
|
||||
# check right deployment exists
|
||||
kube::test::get_object_assert 'deployment nginx' "{{${id_field}}}" 'nginx'
|
||||
kube::test::get_object_assert 'deployment nginx' "{{${id_field:?}}}" 'nginx'
|
||||
# apply deployment with new labels and a conflicting resourceVersion
|
||||
output_message=$(! kubectl apply -f hack/testdata/deployment-label-change2.yaml 2>&1 "${kube_flags[@]}")
|
||||
output_message=$(! kubectl apply -f hack/testdata/deployment-label-change2.yaml 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'Error from server (Conflict)'
|
||||
# apply deployment with --force and --overwrite will succeed
|
||||
kubectl apply -f hack/testdata/deployment-label-change2.yaml --overwrite=true --force=true --grace-period=10
|
||||
# check the changed deployment
|
||||
output_message=$(kubectl apply view-last-applied deploy/nginx -o json 2>&1 "${kube_flags[@]}" |grep nginx2)
|
||||
output_message=$(kubectl apply view-last-applied deploy/nginx -o json 2>&1 "${kube_flags[@]:?}" |grep nginx2)
|
||||
kube::test::if_has_string "${output_message}" '"name": "nginx2"'
|
||||
# applying a resource (with --force) that is both conflicting and invalid will
|
||||
# cause the server to only return a "Conflict" error when we attempt to patch.
|
||||
@@ -161,10 +165,10 @@ run_kubectl_apply_deployments_tests() {
|
||||
# invalid, we will receive an invalid error when we attempt to create it, after
|
||||
# having deleted the old resource. Ensure that when this case is reached, the
|
||||
# old resource is restored once again, and the validation error is printed.
|
||||
output_message=$(! kubectl apply -f hack/testdata/deployment-label-change3.yaml --force 2>&1 "${kube_flags[@]}")
|
||||
output_message=$(! kubectl apply -f hack/testdata/deployment-label-change3.yaml --force 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'Invalid value'
|
||||
# Ensure that the old object has been restored
|
||||
kube::test::get_object_assert 'deployment nginx' "{{${template_labels}}}" 'nginx2'
|
||||
kube::test::get_object_assert 'deployment nginx' "{{${template_labels:?}}}" 'nginx2'
|
||||
# cleanup
|
||||
kubectl delete deployments --all --grace-period=10
|
||||
|
||||
@@ -181,140 +185,140 @@ run_deployment_tests() {
|
||||
# Test kubectl create deployment (using default - old generator)
|
||||
kubectl create deployment test-nginx-extensions --image=k8s.gcr.io/nginx:test-cmd
|
||||
# Post-Condition: Deployment "nginx" is created.
|
||||
kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx'
|
||||
kube::test::get_object_assert 'deploy test-nginx-extensions' "{{${container_name_field:?}}}" 'nginx'
|
||||
# and old generator was used, iow. old defaults are applied
|
||||
output_message=$(kubectl get deployment.apps/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
|
||||
kube::test::if_has_not_string "${output_message}" '2'
|
||||
# Ensure we can interact with deployments through extensions and apps endpoints
|
||||
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
|
||||
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
|
||||
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
|
||||
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'apps/v1'
|
||||
# Clean up
|
||||
kubectl delete deployment test-nginx-extensions "${kube_flags[@]}"
|
||||
kubectl delete deployment test-nginx-extensions "${kube_flags[@]:?}"
|
||||
|
||||
# Test kubectl create deployment
|
||||
kubectl create deployment test-nginx-apps --image=k8s.gcr.io/nginx:test-cmd --generator=deployment-basic/apps.v1beta1
|
||||
# Post-Condition: Deployment "nginx" is created.
|
||||
kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx'
|
||||
kube::test::get_object_assert 'deploy test-nginx-apps' "{{${container_name_field:?}}}" 'nginx'
|
||||
# and new generator was used, iow. new defaults are applied
|
||||
output_message=$(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
|
||||
kube::test::if_has_string "${output_message}" '2'
|
||||
# Ensure we can interact with deployments through extensions and apps endpoints
|
||||
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
|
||||
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
|
||||
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
|
||||
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}")
|
||||
kube::test::if_has_string "${output_message}" 'apps/v1'
|
||||
# Describe command (resource only) should print detailed information
|
||||
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:"
|
||||
# Describe command (resource only) should print detailed information
|
||||
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
|
||||
# Clean up
|
||||
kubectl delete deployment test-nginx-apps "${kube_flags[@]}"
|
||||
kubectl delete deployment test-nginx-apps "${kube_flags[@]:?}"
|
||||
|
||||
### Test kubectl create deployment with image and command
|
||||
# Pre-Condition: No deployment exists.
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create deployment nginx-with-command --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity
|
||||
# Post-Condition: Deployment "nginx" is created.
|
||||
kube::test::get_object_assert 'deploy nginx-with-command' "{{$container_name_field}}" 'nginx'
|
||||
kube::test::get_object_assert 'deploy nginx-with-command' "{{${container_name_field:?}}}" 'nginx'
|
||||
# Clean up
|
||||
kubectl delete deployment nginx-with-command "${kube_flags[@]}"
|
||||
kubectl delete deployment nginx-with-command "${kube_flags[@]:?}"
|
||||
|
||||
### Test kubectl create deployment should not fail validation
|
||||
# Pre-Condition: No deployment exists.
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]:?}"
|
||||
# Post-Condition: Deployment "deployment-with-unixuserid" is created.
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'deployment-with-unixuserid:'
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'deployment-with-unixuserid:'
|
||||
# Clean up
|
||||
kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]}"
|
||||
kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]:?}"
|
||||
|
||||
### Test cascading deletion
|
||||
## Test that rs is deleted when deployment is deleted.
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Create deployment
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]:?}"
|
||||
# Wait for rs to come up.
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '3'
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '3'
|
||||
# Deleting the deployment should delete the rs.
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]:?}"
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
## Test that rs is not deleted when deployment is deleted with cascade set to false.
|
||||
# Pre-condition: no deployment and rs exist
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Create deployment
|
||||
kubectl create deployment nginx-deployment --image=k8s.gcr.io/nginx:test-cmd
|
||||
# Wait for rs to come up.
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '1'
|
||||
# Delete the deployment with cascade set to false.
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}" --cascade=false
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]:?}" --cascade=false
|
||||
# Wait for the deployment to be deleted and then verify that rs is not
|
||||
# deleted.
|
||||
kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
|
||||
kube::test::wait_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '1'
|
||||
# Cleanup
|
||||
# Find the name of the rs to be deleted.
|
||||
output_message=$(kubectl get rs "${kube_flags[@]}" -o template --template={{range.items}}{{$id_field}}{{end}})
|
||||
kubectl delete rs ${output_message} "${kube_flags[@]}"
|
||||
output_message=$(kubectl get rs "${kube_flags[@]:?}" -o template --template="{{range.items}}{{${id_field:?}}}{{end}}")
|
||||
kubectl delete rs "${output_message}" "${kube_flags[@]:?}"
|
||||
|
||||
### Auto scale deployment
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:'
|
||||
# autoscale 2~3 pods, no CPU utilization specified
|
||||
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3
|
||||
kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
|
||||
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]:?}" --min=2 --max=3
|
||||
kube::test::get_object_assert 'hpa nginx-deployment' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80'
|
||||
# Clean up
|
||||
# Note that we should delete hpa first, otherwise it may fight with the deployment reaper.
|
||||
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
|
||||
kubectl delete deployment.apps nginx-deployment "${kube_flags[@]}"
|
||||
kubectl delete hpa nginx-deployment "${kube_flags[@]:?}"
|
||||
kubectl delete deployment.apps nginx-deployment "${kube_flags[@]:?}"
|
||||
|
||||
### Rollback a deployment
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
# Create a deployment (revision 1)
|
||||
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx:'
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Rollback to revision 1 - should be no-op
|
||||
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Update the deployment (revision 2)
|
||||
kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment.apps "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment.apps "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
# Rollback to revision 1 with dry-run - should be no-op
|
||||
kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]}" | grep "test-cmd"
|
||||
kube::test::get_object_assert deployment.apps "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]:?}" | grep "test-cmd"
|
||||
kube::test::get_object_assert deployment.apps "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
# Rollback to revision 1
|
||||
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
|
||||
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]:?}"
|
||||
sleep 1
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Rollback to revision 1000000 - should be no-op
|
||||
! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Rollback to last revision
|
||||
kubectl rollout undo deployment nginx "${kube_flags[@]}"
|
||||
kubectl rollout undo deployment nginx "${kube_flags[@]:?}"
|
||||
sleep 1
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
# Pause the deployment
|
||||
kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]}"
|
||||
kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]:?}"
|
||||
# A paused deployment cannot be rolled back
|
||||
! kubectl rollout undo deployment nginx "${kube_flags[@]}"
|
||||
! kubectl rollout undo deployment nginx "${kube_flags[@]:?}"
|
||||
# A paused deployment cannot be restarted
|
||||
! kubectl rollout restart deployment nginx "${kube_flags[@]}"
|
||||
! kubectl rollout restart deployment nginx "${kube_flags[@]:?}"
|
||||
# Resume the deployment
|
||||
kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]}"
|
||||
kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]:?}"
|
||||
# The resumed deployment can now be rolled back
|
||||
kubectl rollout undo deployment nginx "${kube_flags[@]}"
|
||||
kubectl rollout undo deployment nginx "${kube_flags[@]:?}"
|
||||
# Check that the new replica set has all old revisions stored in an annotation
|
||||
newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
|
||||
kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
|
||||
@@ -326,84 +330,84 @@ run_deployment_tests() {
|
||||
newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
|
||||
rs="$(kubectl get rs "${newrs}" -o yaml)"
|
||||
kube::test::if_has_string "${rs}" "deployment.kubernetes.io/revision: \"6\""
|
||||
cat hack/testdata/deployment-revision1.yaml | ${SED} "s/name: nginx$/name: nginx2/" | kubectl create -f - "${kube_flags[@]}"
|
||||
${SED} "s/name: nginx$/name: nginx2/" hack/testdata/deployment-revision1.yaml | kubectl create -f - "${kube_flags[@]:?}"
|
||||
# Deletion of both deployments should not be blocked
|
||||
kubectl delete deployment nginx2 "${kube_flags[@]}"
|
||||
kubectl delete deployment nginx2 "${kube_flags[@]:?}"
|
||||
# Clean up
|
||||
kubectl delete deployment nginx "${kube_flags[@]}"
|
||||
kubectl delete deployment nginx "${kube_flags[@]:?}"
|
||||
|
||||
### Set image of a deployment
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Create a deployment
|
||||
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
|
||||
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:'
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
|
||||
# Set the deployment's image
|
||||
kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
|
||||
kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
|
||||
# Set non-existing container should fail
|
||||
! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]}"
|
||||
! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]:?}"
|
||||
# Set image of deployments without specifying name
|
||||
kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
|
||||
kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
|
||||
# Set image of a deployment specified by file
|
||||
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
|
||||
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
|
||||
# Set image of a local file without talking to the server
|
||||
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" --local -o yaml
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
|
||||
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}" --local -o yaml
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
|
||||
# Set image of all containers of the deployment
|
||||
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kubectl set image deployment nginx-deployment "*=${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Set image of all containners of the deployment again when image not change
|
||||
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kubectl set image deployment nginx-deployment "*=${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Clean up
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]:?}"
|
||||
|
||||
### Set env of a deployment
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Create a deployment
|
||||
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
|
||||
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]:?}"
|
||||
kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]:?}"
|
||||
kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:'
|
||||
#configmap is special here due to controller will create kube-root-ca.crt for each namespace automatically
|
||||
kube::test::get_object_assert 'configmaps/test-set-env-config' "{{$id_field}}" 'test-set-env-config'
|
||||
kube::test::get_object_assert secret "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-secret:'
|
||||
kube::test::get_object_assert 'configmaps/test-set-env-config' "{{${id_field:?}}}" 'test-set-env-config'
|
||||
kube::test::get_object_assert secret "{{range.items}}{{${id_field:?}}}:{{end}}" 'test-set-env-secret:'
|
||||
# Set env of deployments by configmap from keys
|
||||
kubectl set env deployment nginx-deployment --keys=key-2 --from=configmap/test-set-env-config "${kube_flags[@]}"
|
||||
kubectl set env deployment nginx-deployment --keys=key-2 --from=configmap/test-set-env-config "${kube_flags[@]:?}"
|
||||
# Assert correct value in deployment env
|
||||
kube::test::get_object_assert 'deploy nginx-deployment' "{{ (index (index .spec.template.spec.containers 0).env 0).name}}" 'KEY_2'
|
||||
# Assert single value in deployment env
|
||||
kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1'
|
||||
# Set env of deployments by configmap
|
||||
kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]}"
|
||||
kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]:?}"
|
||||
# Assert all values in deployment env
|
||||
kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '2'
|
||||
# Set env of deployments for all container
|
||||
kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]}"
|
||||
kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]:?}"
|
||||
# Set env of deployments for specific container
|
||||
kubectl set env deployment nginx-deployment superenv=superprod -c=nginx "${kube_flags[@]}"
|
||||
kubectl set env deployment nginx-deployment superenv=superprod -c=nginx "${kube_flags[@]:?}"
|
||||
# Set env of deployments by secret from keys
|
||||
kubectl set env deployment nginx-deployment --keys=username --from=secret/test-set-env-secret "${kube_flags[@]}"
|
||||
kubectl set env deployment nginx-deployment --keys=username --from=secret/test-set-env-secret "${kube_flags[@]:?}"
|
||||
# Set env of deployments by secret
|
||||
kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]}"
|
||||
kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]:?}"
|
||||
# Remove specific env of deployment
|
||||
kubectl set env deployment nginx-deployment env-
|
||||
# Clean up
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
||||
kubectl delete configmap test-set-env-config "${kube_flags[@]}"
|
||||
kubectl delete secret test-set-env-secret "${kube_flags[@]}"
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]:?}"
|
||||
kubectl delete configmap test-set-env-config "${kube_flags[@]:?}"
|
||||
kubectl delete secret test-set-env-secret "${kube_flags[@]:?}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
@@ -418,42 +422,42 @@ run_statefulset_history_tests() {
|
||||
|
||||
### Test rolling back a StatefulSet
|
||||
# Pre-condition: no statefulset or its pods exists
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
# Create a StatefulSet (revision 1)
|
||||
kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*"
|
||||
kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]:?}"
|
||||
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*"
|
||||
# Rollback to revision 1 - should be no-op
|
||||
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
|
||||
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
|
||||
# Update the statefulset (revision 2)
|
||||
kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
|
||||
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*"
|
||||
kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]:?}"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
|
||||
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*"
|
||||
# Rollback to revision 1 with dry-run - should be no-op
|
||||
kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]}"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
|
||||
kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
|
||||
# Rollback to revision 1
|
||||
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
|
||||
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]:?}"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
|
||||
# Rollback to revision 1000000 - should fail
|
||||
output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
|
||||
output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]:?}" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" "unable to find specified revision"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
|
||||
# Rollback to last revision
|
||||
kubectl rollout undo statefulset "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
|
||||
kubectl rollout undo statefulset "${kube_flags[@]:?}"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
|
||||
# Clean up - delete newest configuration
|
||||
kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]}"
|
||||
kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]:?}"
|
||||
# Post-condition: no pods from statefulset controller
|
||||
wait-for-pods-with-label "app=nginx-statefulset" ""
|
||||
|
||||
@@ -470,26 +474,30 @@ run_stateful_set_tests() {
|
||||
|
||||
### Create and stop statefulset, make sure it doesn't leak pods
|
||||
# Pre-condition: no statefulset exists
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command: create statefulset
|
||||
kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]:?}"
|
||||
|
||||
### Scale statefulset test with current-replicas and replicas
|
||||
# Pre-condition: 0 replicas
|
||||
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '0'
|
||||
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '1'
|
||||
kube::test::get_object_assert 'statefulset nginx' "{{${statefulset_replicas_field:?}}}" '0'
|
||||
kube::test::wait_object_assert 'statefulset nginx' "{{${statefulset_observed_generation:?}}}" '1'
|
||||
# Command: Scale up
|
||||
kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]}"
|
||||
kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]:?}"
|
||||
# Post-condition: 1 replica, named nginx-0
|
||||
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '1'
|
||||
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '2'
|
||||
kube::test::get_object_assert 'statefulset nginx' "{{${statefulset_replicas_field:?}}}" '1'
|
||||
kube::test::wait_object_assert 'statefulset nginx' "{{${statefulset_observed_generation:?}}}" '2'
|
||||
# Typically we'd wait and confirm that N>1 replicas are up, but this framework
|
||||
# doesn't start the scheduler, so pet-0 will block all others.
|
||||
# TODO: test robust scaling in an e2e.
|
||||
wait-for-pods-with-label "app=nginx-statefulset" "nginx-0"
|
||||
|
||||
# Rollout restart should change generation
|
||||
kubectl rollout restart statefulset nginx "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '3'
|
||||
|
||||
### Clean up
|
||||
kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
|
||||
kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]:?}"
|
||||
# Post-condition: no pods from statefulset controller
|
||||
wait-for-pods-with-label "app=nginx-statefulset" ""
|
||||
|
||||
@@ -507,40 +515,40 @@ run_rs_tests() {
|
||||
|
||||
### Create and stop a replica set, make sure it doesn't leak pods
|
||||
# Pre-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
|
||||
kube::log::status "Deleting rs"
|
||||
kubectl delete rs frontend "${kube_flags[@]}"
|
||||
kubectl delete rs frontend "${kube_flags[@]:?}"
|
||||
# Post-condition: no pods from frontend replica set
|
||||
kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods.
|
||||
# Pre-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
#TODO(mortent): Remove this workaround when ReplicaSet bug described in issue #69376 is fixed
|
||||
local replicaset_name="frontend-no-cascade"
|
||||
sed -r 's/^(\s*)(name\s*:\s*frontend\s*$)/\1name: '"${replicaset_name}"'/' hack/testdata/frontend-replicaset.yaml | kubectl create "${kube_flags[@]}" -f -
|
||||
sed -r 's/^(\s*)(name\s*:\s*frontend\s*$)/\1name: '"${replicaset_name:?}"'/' hack/testdata/frontend-replicaset.yaml | kubectl create "${kube_flags[@]:?}" -f -
|
||||
# wait for all 3 pods to be set up
|
||||
kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
|
||||
kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{${pod_container_name_field:?}}}:{{end}}" 'php-redis:php-redis:php-redis:'
|
||||
kube::log::status "Deleting rs"
|
||||
kubectl delete rs "${replicaset_name}" "${kube_flags[@]}" --cascade=false
|
||||
kubectl delete rs "${replicaset_name}" "${kube_flags[@]:?}" --cascade=false
|
||||
# Wait for the rs to be deleted.
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Post-condition: All 3 pods still remain from frontend replica set
|
||||
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
|
||||
# Cleanup
|
||||
kubectl delete pods -l "tier=frontend" "${kube_flags[@]}"
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kubectl delete pods -l "tier=frontend" "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
### Create replica set frontend from YAML
|
||||
# Pre-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
|
||||
# Post-condition: frontend replica set is created
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
|
||||
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:'
|
||||
# Describe command should print detailed information
|
||||
kube::test::describe_object_assert rs 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
|
||||
# Describe command should print events information by default
|
||||
@@ -562,16 +570,16 @@ run_rs_tests() {
|
||||
|
||||
### Scale replica set frontend with current-replicas and replicas
|
||||
# Pre-condition: 3 replicas
|
||||
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
|
||||
kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '3'
|
||||
# Command
|
||||
kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}"
|
||||
kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]:?}"
|
||||
# Post-condition: 2 replicas
|
||||
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2'
|
||||
kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '2'
|
||||
|
||||
# Set up three deploy, two deploy have same label
|
||||
kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]:?}"
|
||||
kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]:?}"
|
||||
kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '1'
|
||||
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '1'
|
||||
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
|
||||
@@ -586,78 +594,78 @@ run_rs_tests() {
|
||||
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '3'
|
||||
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '3'
|
||||
# Clean-up
|
||||
kubectl delete rs frontend "${kube_flags[@]}"
|
||||
kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]}"
|
||||
kubectl delete rs frontend "${kube_flags[@]:?}"
|
||||
kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]:?}"
|
||||
|
||||
### Expose replica set as service
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
|
||||
# Pre-condition: 3 replicas
|
||||
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
|
||||
kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '3'
|
||||
# Command
|
||||
kubectl expose rs frontend --port=80 "${kube_flags[@]}"
|
||||
kubectl expose rs frontend --port=80 "${kube_flags[@]:?}"
|
||||
# Post-condition: service exists and the port is unnamed
|
||||
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
|
||||
kube::test::get_object_assert 'service frontend' "{{${port_name:?}}} {{${port_field:?}}}" '<no value> 80'
|
||||
# Create a service using service/v1 generator
|
||||
kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}"
|
||||
kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]:?}"
|
||||
# Post-condition: service exists and the port is named default.
|
||||
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80'
|
||||
kube::test::get_object_assert 'service frontend-2' "{{${port_name:?}}} {{${port_field:?}}}" 'default 80'
|
||||
# Cleanup services
|
||||
kubectl delete service frontend{,-2} "${kube_flags[@]}"
|
||||
kubectl delete service frontend{,-2} "${kube_flags[@]:?}"
|
||||
|
||||
# Test set commands
|
||||
# Pre-condition: frontend replica set exists at generation 1
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '1'
|
||||
kubectl set image rs/frontend "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '2'
|
||||
kubectl set env rs/frontend "${kube_flags[@]}" foo=bar
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '3'
|
||||
kubectl set resources rs/frontend "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '4'
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '1'
|
||||
kubectl set image rs/frontend "${kube_flags[@]:?}" "*=k8s.gcr.io/pause:test-cmd"
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '2'
|
||||
kubectl set env rs/frontend "${kube_flags[@]:?}" foo=bar
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '3'
|
||||
kubectl set resources rs/frontend "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '4'
|
||||
|
||||
### Delete replica set with id
|
||||
# Pre-condition: frontend replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
|
||||
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:'
|
||||
# Command
|
||||
kubectl delete rs frontend "${kube_flags[@]}"
|
||||
kubectl delete rs frontend "${kube_flags[@]:?}"
|
||||
# Post-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
### Create two replica sets
|
||||
# Pre-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
|
||||
kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]:?}"
|
||||
# Post-condition: frontend and redis-slave
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
|
||||
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:redis-slave:'
|
||||
|
||||
### Delete multiple replica sets at once
|
||||
# Pre-condition: frontend and redis-slave
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
|
||||
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:redis-slave:'
|
||||
# Command
|
||||
kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once
|
||||
kubectl delete rs frontend redis-slave "${kube_flags[@]:?}" # delete multiple replica sets at once
|
||||
# Post-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
|
||||
if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then
|
||||
if kube::test::if_supports_resource "${horizontalpodautoscalers:?}" ; then
|
||||
### Auto scale replica set
|
||||
# Pre-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
|
||||
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:'
|
||||
# autoscale 1~2 pods, CPU utilization 70%, replica set specified by file
|
||||
kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
|
||||
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
|
||||
kubectl delete hpa frontend "${kube_flags[@]}"
|
||||
kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" --max=2 --cpu-percent=70
|
||||
kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '1 2 70'
|
||||
kubectl delete hpa frontend "${kube_flags[@]:?}"
|
||||
# autoscale 2~3 pods, no CPU utilization specified, replica set specified by name
|
||||
kubectl autoscale rs frontend "${kube_flags[@]}" --min=2 --max=3
|
||||
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
|
||||
kubectl delete hpa frontend "${kube_flags[@]}"
|
||||
kubectl autoscale rs frontend "${kube_flags[@]:?}" --min=2 --max=3
|
||||
kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80'
|
||||
kubectl delete hpa frontend "${kube_flags[@]:?}"
|
||||
# autoscale without specifying --max should fail
|
||||
! kubectl autoscale rs frontend "${kube_flags[@]}"
|
||||
! kubectl autoscale rs frontend "${kube_flags[@]:?}"
|
||||
# Clean up
|
||||
kubectl delete rs frontend "${kube_flags[@]}"
|
||||
kubectl delete rs frontend "${kube_flags[@]:?}"
|
||||
fi
|
||||
|
||||
set +o nounset
|
||||
|
||||
@@ -59,21 +59,21 @@ run_impersonation_tests() {
|
||||
|
||||
kube::log::status "Testing impersonation"
|
||||
|
||||
output_message=$(! kubectl get pods "${kube_flags_with_token[@]}" --as-group=foo 2>&1)
|
||||
output_message=$(! kubectl get pods "${kube_flags_with_token[@]:?}" --as-group=foo 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'without impersonating a user'
|
||||
|
||||
if kube::test::if_supports_resource "${csr}" ; then
|
||||
if kube::test::if_supports_resource "${csr:?}" ; then
|
||||
# --as
|
||||
kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1
|
||||
kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]:?}" --as=user1
|
||||
kube::test::get_object_assert 'csr/foo' '{{.spec.username}}' 'user1'
|
||||
kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}}{{end}}' 'system:authenticated'
|
||||
kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}"
|
||||
kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]:?}"
|
||||
|
||||
# --as-group
|
||||
kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1 --as-group=group2 --as-group=group1 --as-group=,,,chameleon
|
||||
kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]:?}" --as=user1 --as-group=group2 --as-group=group1 --as-group=,,,chameleon
|
||||
kube::test::get_object_assert 'csr/foo' '{{len .spec.groups}}' '3'
|
||||
kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}} {{end}}' 'group2 group1 ,,,chameleon '
|
||||
kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}"
|
||||
kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]:?}"
|
||||
fi
|
||||
|
||||
set +o nounset
|
||||
|
||||
1
test/conformance/testdata/OWNERS
vendored
1
test/conformance/testdata/OWNERS
vendored
@@ -9,6 +9,7 @@ reviewers:
|
||||
- spiffxp
|
||||
- timothysc
|
||||
- dims
|
||||
- johnbelamaric
|
||||
approvers:
|
||||
- bgrant0607
|
||||
- smarterclayton
|
||||
|
||||
@@ -81,6 +81,7 @@ go_library(
|
||||
"//test/e2e/apps:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
|
||||
@@ -41,6 +41,7 @@ import (
|
||||
rbacv1beta1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
|
||||
"k8s.io/utils/pointer"
|
||||
@@ -373,16 +374,16 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
}, "Waited %s for the sample-apiserver to be ready to handle requests.")
|
||||
if err != nil {
|
||||
currentAPIServiceJSON, _ := json.Marshal(currentAPIService)
|
||||
framework.Logf("current APIService: %s", string(currentAPIServiceJSON))
|
||||
e2elog.Logf("current APIService: %s", string(currentAPIServiceJSON))
|
||||
|
||||
currentPodsJSON, _ := json.Marshal(currentPods)
|
||||
framework.Logf("current pods: %s", string(currentPodsJSON))
|
||||
e2elog.Logf("current pods: %s", string(currentPodsJSON))
|
||||
|
||||
if currentPods != nil {
|
||||
for _, pod := range currentPods.Items {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
logs, err := framework.GetPodLogs(client, namespace, pod.Name, container.Name)
|
||||
framework.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs)
|
||||
e2elog.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -485,12 +486,12 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
}
|
||||
|
||||
// pollTimed will call Poll but time how long Poll actually took.
|
||||
// It will then framework.logf the msg with the duration of the Poll.
|
||||
// It will then e2elog.Logf the msg with the duration of the Poll.
|
||||
// It is assumed that msg will contain one %s for the elapsed time.
|
||||
func pollTimed(interval, timeout time.Duration, condition wait.ConditionFunc, msg string) error {
|
||||
defer func(start time.Time, msg string) {
|
||||
elapsed := time.Since(start)
|
||||
framework.Logf(msg, elapsed)
|
||||
e2elog.Logf(msg, elapsed)
|
||||
}(time.Now(), msg)
|
||||
return wait.Poll(interval, timeout, condition)
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
const numberOfTotalResources = 400
|
||||
@@ -62,7 +63,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
framework.Logf("Got an error creating template %d: %v", i, err)
|
||||
e2elog.Logf("Got an error creating template %d: %v", i, err)
|
||||
}
|
||||
ginkgo.Fail("Unable to create template %d, exiting", i)
|
||||
})
|
||||
@@ -81,7 +82,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
|
||||
list, err := client.List(opts)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit))
|
||||
|
||||
if len(lastRV) == 0 {
|
||||
@@ -122,7 +123,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
firstToken := list.Continue
|
||||
firstRV := list.ResourceVersion
|
||||
gomega.Expect(int(list.RemainingItemCount) + len(list.Items)).To(gomega.BeNumerically("==", numberOfTotalResources))
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
|
||||
e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
|
||||
|
||||
ginkgo.By("retrieving the second page until the token expires")
|
||||
opts.Continue = firstToken
|
||||
@@ -130,13 +131,13 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) {
|
||||
_, err := client.List(opts)
|
||||
if err == nil {
|
||||
framework.Logf("Token %s has not expired yet", firstToken)
|
||||
e2elog.Logf("Token %s has not expired yet", firstToken)
|
||||
return false, nil
|
||||
}
|
||||
if err != nil && !errors.IsResourceExpired(err) {
|
||||
return false, err
|
||||
}
|
||||
framework.Logf("got error %s", err)
|
||||
e2elog.Logf("got error %s", err)
|
||||
status, ok := err.(errors.APIStatus)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("expect error to implement the APIStatus interface, got %v", reflect.TypeOf(err))
|
||||
@@ -145,7 +146,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
if len(inconsistentToken) == 0 {
|
||||
return false, fmt.Errorf("expect non empty continue token")
|
||||
}
|
||||
framework.Logf("Retrieved inconsistent continue %s", inconsistentToken)
|
||||
e2elog.Logf("Retrieved inconsistent continue %s", inconsistentToken)
|
||||
return true, nil
|
||||
})
|
||||
|
||||
@@ -169,7 +170,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
list, err := client.List(opts)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
gomega.Expect(int(list.RemainingItemCount) + len(list.Items) + found).To(gomega.BeNumerically("==", numberOfTotalResources))
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit))
|
||||
gomega.Expect(list.ResourceVersion).To(gomega.Equal(lastRV))
|
||||
for _, item := range list.Items {
|
||||
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
"k8s.io/utils/pointer"
|
||||
@@ -169,7 +170,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa
|
||||
},
|
||||
})
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
framework.Logf("role binding %s already exists", roleBindingCRDName)
|
||||
e2elog.Logf("role binding %s already exists", roleBindingCRDName)
|
||||
} else {
|
||||
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
|
||||
@@ -80,35 +79,35 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
|
||||
noxuResourceClient := newNamespacedCustomResourceClient(ns, f.DynamicClient, noxuDefinition)
|
||||
|
||||
watchA, err := watchCRWithName(noxuResourceClient, watchCRNameA)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to watch custom resource: %s", watchCRNameA)
|
||||
framework.ExpectNoError(err, "failed to watch custom resource: %s", watchCRNameA)
|
||||
|
||||
watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to watch custom resource: %s", watchCRNameB)
|
||||
framework.ExpectNoError(err, "failed to watch custom resource: %s", watchCRNameB)
|
||||
|
||||
testCrA := fixtures.NewNoxuInstance(ns, watchCRNameA)
|
||||
testCrB := fixtures.NewNoxuInstance(ns, watchCRNameB)
|
||||
|
||||
ginkgo.By("Creating first CR ")
|
||||
testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to instantiate custom resource: %+v", testCrA)
|
||||
framework.ExpectNoError(err, "failed to instantiate custom resource: %+v", testCrA)
|
||||
expectEvent(watchA, watch.Added, testCrA)
|
||||
expectNoEvent(watchB, watch.Added, testCrA)
|
||||
|
||||
ginkgo.By("Creating second CR")
|
||||
testCrB, err = instantiateCustomResource(testCrB, noxuResourceClient, noxuDefinition)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to instantiate custom resource: %+v", testCrB)
|
||||
framework.ExpectNoError(err, "failed to instantiate custom resource: %+v", testCrB)
|
||||
expectEvent(watchB, watch.Added, testCrB)
|
||||
expectNoEvent(watchA, watch.Added, testCrB)
|
||||
|
||||
ginkgo.By("Deleting first CR")
|
||||
err = deleteCustomResource(noxuResourceClient, watchCRNameA)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete custom resource: %s", watchCRNameA)
|
||||
framework.ExpectNoError(err, "failed to delete custom resource: %s", watchCRNameA)
|
||||
expectEvent(watchA, watch.Deleted, nil)
|
||||
expectNoEvent(watchB, watch.Deleted, nil)
|
||||
|
||||
ginkgo.By("Deleting second CR")
|
||||
err = deleteCustomResource(noxuResourceClient, watchCRNameB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete custom resource: %s", watchCRNameB)
|
||||
framework.ExpectNoError(err, "failed to delete custom resource: %s", watchCRNameB)
|
||||
expectEvent(watchB, watch.Deleted, nil)
|
||||
expectNoEvent(watchA, watch.Deleted, nil)
|
||||
})
|
||||
|
||||
@@ -40,8 +40,6 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
|
||||
*/
|
||||
framework.ConformanceIt("creating/deleting custom resource definition objects works ", func() {
|
||||
|
||||
framework.SkipUnlessServerVersionGTE(crdVersion, f.ClientSet.Discovery())
|
||||
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
framework.Failf("failed to load config: %v", err)
|
||||
|
||||
@@ -25,12 +25,12 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/apps"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
|
||||
@@ -45,13 +45,14 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
|
||||
// providers that provide those capabilities.
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
|
||||
gomega.Expect(framework.RunRC(testutils.RCConfig{
|
||||
err := framework.RunRC(testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
Name: "baz",
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 1,
|
||||
})).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("should recover from network partition with master", func() {
|
||||
@@ -97,7 +98,7 @@ func doEtcdFailure(failCommand, fixCommand string) {
|
||||
func masterExec(cmd string) {
|
||||
host := framework.GetMasterHost() + ":22"
|
||||
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
|
||||
framework.ExpectNoError(err, "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
|
||||
if result.Code != 0 {
|
||||
e2essh.LogResult(result)
|
||||
framework.Failf("master exec command returned non-zero")
|
||||
@@ -114,7 +115,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
||||
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
framework.Logf("apiserver returned error, as expected before recovery: %v", err)
|
||||
e2elog.Logf("apiserver returned error, as expected before recovery: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if len(pods.Items) == 0 {
|
||||
@@ -122,9 +123,9 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
}
|
||||
framework.Logf("apiserver has recovered")
|
||||
e2elog.Logf("apiserver has recovered")
|
||||
return true, nil
|
||||
}))
|
||||
|
||||
@@ -132,7 +133,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
|
||||
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())
|
||||
framework.ExpectNoError(err, "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) {
|
||||
return true, nil
|
||||
|
||||
@@ -38,10 +38,10 @@ import (
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@@ -244,14 +244,14 @@ func gatherMetrics(f *framework.Framework) {
|
||||
var summary framework.TestDataSummary
|
||||
grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, false, false, true, false, false)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.")
|
||||
e2elog.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.")
|
||||
} else {
|
||||
received, err := grabber.Grab()
|
||||
if err != nil {
|
||||
framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.")
|
||||
e2elog.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.")
|
||||
} else {
|
||||
summary = (*framework.MetricsForE2E)(&received)
|
||||
framework.Logf(summary.PrintHumanReadable())
|
||||
e2elog.Logf(summary.PrintHumanReadable())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -653,15 +653,15 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
_, err := rcClient.Get(rc.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
pods, _ := podClient.List(metav1.ListOptions{})
|
||||
framework.Logf("%d pods remaining", len(pods.Items))
|
||||
e2elog.Logf("%d pods remaining", len(pods.Items))
|
||||
count := 0
|
||||
for _, pod := range pods.Items {
|
||||
if pod.ObjectMeta.DeletionTimestamp == nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
framework.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
framework.Logf("")
|
||||
e2elog.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
e2elog.Logf("")
|
||||
return false, nil
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -673,10 +673,10 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
if err2 != nil {
|
||||
framework.Failf("%v", err2)
|
||||
}
|
||||
framework.Logf("%d remaining pods are:", len(pods.Items))
|
||||
framework.Logf("The ObjectMeta of the remaining pods are:")
|
||||
e2elog.Logf("%d remaining pods are:", len(pods.Items))
|
||||
e2elog.Logf("The ObjectMeta of the remaining pods are:")
|
||||
for _, pod := range pods.Items {
|
||||
framework.Logf("%#v", pod.ObjectMeta)
|
||||
e2elog.Logf("%#v", pod.ObjectMeta)
|
||||
}
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
@@ -734,12 +734,12 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
ginkgo.By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name))
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s", f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to list pods in namespace: %s", f.Namespace.Name)
|
||||
patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID)
|
||||
for i := 0; i < halfReplicas; i++ {
|
||||
pod := pods.Items[i]
|
||||
_, err := podClient.Patch(pod.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch)
|
||||
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch)
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("delete the rc %s", rc1Name))
|
||||
@@ -755,15 +755,15 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
_, err := rcClient.Get(rc1.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
pods, _ := podClient.List(metav1.ListOptions{})
|
||||
framework.Logf("%d pods remaining", len(pods.Items))
|
||||
e2elog.Logf("%d pods remaining", len(pods.Items))
|
||||
count := 0
|
||||
for _, pod := range pods.Items {
|
||||
if pod.ObjectMeta.DeletionTimestamp == nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
framework.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
framework.Logf("")
|
||||
e2elog.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
e2elog.Logf("")
|
||||
return false, nil
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -775,10 +775,10 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
if err2 != nil {
|
||||
framework.Failf("%v", err2)
|
||||
}
|
||||
framework.Logf("%d remaining pods are:", len(pods.Items))
|
||||
framework.Logf("ObjectMeta of remaining pods are:")
|
||||
e2elog.Logf("%d remaining pods are:", len(pods.Items))
|
||||
e2elog.Logf("ObjectMeta of remaining pods are:")
|
||||
for _, pod := range pods.Items {
|
||||
framework.Logf("%#v", pod.ObjectMeta)
|
||||
e2elog.Logf("%#v", pod.ObjectMeta)
|
||||
}
|
||||
framework.Failf("failed to delete rc %s, err: %v", rc1Name, err)
|
||||
}
|
||||
@@ -814,36 +814,36 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
pod1Name := "pod1"
|
||||
pod1 := newGCPod(pod1Name)
|
||||
pod1, err := podClient.Create(pod1)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name)
|
||||
pod2Name := "pod2"
|
||||
pod2 := newGCPod(pod2Name)
|
||||
pod2, err = podClient.Create(pod2)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name)
|
||||
pod3Name := "pod3"
|
||||
pod3 := newGCPod(pod3Name)
|
||||
pod3, err = podClient.Create(pod3)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name)
|
||||
// create circular dependency
|
||||
addRefPatch := func(name string, uid types.UID) []byte {
|
||||
return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid))
|
||||
}
|
||||
patch1 := addRefPatch(pod3.Name, pod3.UID)
|
||||
pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, patch1)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1)
|
||||
framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
|
||||
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1)
|
||||
e2elog.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
|
||||
patch2 := addRefPatch(pod1.Name, pod1.UID)
|
||||
pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, patch2)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2)
|
||||
framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
|
||||
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2)
|
||||
e2elog.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
|
||||
patch3 := addRefPatch(pod2.Name, pod2.UID)
|
||||
pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, patch3)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3)
|
||||
framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
|
||||
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3)
|
||||
e2elog.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
|
||||
// delete one pod, should result in the deletion of all pods
|
||||
deleteOptions := getForegroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID))
|
||||
err = podClient.Delete(pod1.ObjectMeta.Name, deleteOptions)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name)
|
||||
var pods *v1.PodList
|
||||
var err2 error
|
||||
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
|
||||
@@ -858,7 +858,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
framework.Logf("pods are %#v", pods.Items)
|
||||
e2elog.Logf("pods are %#v", pods.Items)
|
||||
framework.Failf("failed to wait for all pods to be deleted: %v", err)
|
||||
}
|
||||
})
|
||||
@@ -909,7 +909,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
if err != nil {
|
||||
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
framework.Logf("created owner resource %q", ownerName)
|
||||
e2elog.Logf("created owner resource %q", ownerName)
|
||||
|
||||
// Create a custom dependent resource.
|
||||
dependentName := names.SimpleNameGenerator.GenerateName("dependent")
|
||||
@@ -934,7 +934,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
if err != nil {
|
||||
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
|
||||
}
|
||||
framework.Logf("created dependent resource %q", dependentName)
|
||||
e2elog.Logf("created dependent resource %q", dependentName)
|
||||
|
||||
// Delete the owner.
|
||||
background := metav1.DeletePropagationBackground
|
||||
@@ -948,8 +948,8 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
_, err := resourceClient.Get(dependentName, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err), nil
|
||||
}); err != nil {
|
||||
framework.Logf("owner: %#v", persistedOwner)
|
||||
framework.Logf("dependent: %#v", persistedDependent)
|
||||
e2elog.Logf("owner: %#v", persistedOwner)
|
||||
e2elog.Logf("dependent: %#v", persistedDependent)
|
||||
framework.Failf("failed waiting for dependent resource %q to be deleted", dependentName)
|
||||
}
|
||||
|
||||
@@ -1010,7 +1010,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
if err != nil {
|
||||
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
framework.Logf("created owner resource %q", ownerName)
|
||||
e2elog.Logf("created owner resource %q", ownerName)
|
||||
|
||||
// Create a custom dependent resource.
|
||||
dependentName := names.SimpleNameGenerator.GenerateName("dependent")
|
||||
@@ -1035,7 +1035,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
if err != nil {
|
||||
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
|
||||
}
|
||||
framework.Logf("created dependent resource %q", dependentName)
|
||||
e2elog.Logf("created dependent resource %q", dependentName)
|
||||
|
||||
// Delete the owner and orphan the dependent.
|
||||
err = resourceClient.Delete(ownerName, getOrphanOptions())
|
||||
@@ -1073,7 +1073,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
ginkgo.By("Create the cronjob")
|
||||
cronJob := newCronJob("simple", "*/1 * * * ?")
|
||||
cronJob, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Create(cronJob)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Wait for the CronJob to create new Job")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) {
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@@ -46,7 +47,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
|
||||
defer ginkgo.GinkgoRecover()
|
||||
ns := fmt.Sprintf("nslifetest-%v", n)
|
||||
_, err = f.CreateNamespace(ns, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", ns)
|
||||
framework.ExpectNoError(err, "failed to create namespace: %s", ns)
|
||||
}(n)
|
||||
}
|
||||
wg.Wait()
|
||||
@@ -56,7 +57,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
|
||||
time.Sleep(time.Duration(10 * time.Second))
|
||||
deleteFilter := []string{"nslifetest"}
|
||||
deleted, err := framework.DeleteNamespaces(f.ClientSet, deleteFilter, nil /* skipFilter */)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete namespace(s) containing: %s", deleteFilter)
|
||||
framework.ExpectNoError(err, "failed to delete namespace(s) containing: %s", deleteFilter)
|
||||
gomega.Expect(len(deleted)).To(gomega.Equal(totalNS))
|
||||
|
||||
ginkgo.By("Waiting for namespaces to vanish")
|
||||
@@ -74,7 +75,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
|
||||
}
|
||||
}
|
||||
if cnt > maxAllowedAfterDel {
|
||||
framework.Logf("Remaining namespaces : %v", cnt)
|
||||
e2elog.Logf("Remaining namespaces : %v", cnt)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@@ -85,11 +86,11 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
ginkgo.By("Creating a test namespace")
|
||||
namespaceName := "nsdeletetest"
|
||||
namespace, err := f.CreateNamespace(namespaceName, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
|
||||
|
||||
ginkgo.By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||
framework.ExpectNoError(err, "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||
|
||||
ginkgo.By("Creating a pod in the namespace")
|
||||
podName := "test-pod"
|
||||
@@ -107,14 +108,14 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
},
|
||||
}
|
||||
pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", podName, namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, namespace.Name)
|
||||
|
||||
ginkgo.By("Waiting for the pod to have running status")
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
|
||||
ginkgo.By("Deleting the namespace")
|
||||
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete namespace: %s", namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name)
|
||||
|
||||
ginkgo.By("Waiting for the namespace to be removed.")
|
||||
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
|
||||
@@ -129,7 +130,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
|
||||
ginkgo.By("Recreating the namespace")
|
||||
namespace, err = f.CreateNamespace(namespaceName, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
|
||||
|
||||
ginkgo.By("Verifying there are no pods in the namespace")
|
||||
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
@@ -142,11 +143,11 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
ginkgo.By("Creating a test namespace")
|
||||
namespaceName := "nsdeletetest"
|
||||
namespace, err := f.CreateNamespace(namespaceName, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
|
||||
|
||||
ginkgo.By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||
framework.ExpectNoError(err, "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||
|
||||
ginkgo.By("Creating a service in the namespace")
|
||||
serviceName := "test-service"
|
||||
@@ -167,11 +168,11 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
},
|
||||
}
|
||||
service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(service)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service %s in namespace %s", serviceName, namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create service %s in namespace %s", serviceName, namespace.Name)
|
||||
|
||||
ginkgo.By("Deleting the namespace")
|
||||
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete namespace: %s", namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name)
|
||||
|
||||
ginkgo.By("Waiting for the namespace to be removed.")
|
||||
maxWaitSeconds := int64(60)
|
||||
@@ -186,7 +187,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
|
||||
ginkgo.By("Recreating the namespace")
|
||||
namespace, err = f.CreateNamespace(namespaceName, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
|
||||
|
||||
ginkgo.By("Verifying there is no service in the namespace")
|
||||
_, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(service.Name, metav1.GetOptions{})
|
||||
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@@ -215,7 +216,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
||||
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
|
||||
pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
ginkgo.By("Not allowing a pod to be created that exceeds remaining quota(validation on extended resources)")
|
||||
requests = v1.ResourceList{}
|
||||
@@ -227,7 +228,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
||||
limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
|
||||
pod = newTestPodForQuota(f, "fail-pod-for-extended-resource", requests, limits)
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
ginkgo.By("Ensuring a pod cannot update its resource requirements")
|
||||
// a pod cannot dynamically update its resource requirements.
|
||||
@@ -237,7 +238,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
||||
requests[v1.ResourceEphemeralStorage] = resource.MustParse("10Gi")
|
||||
podToUpdate.Spec.Containers[0].Resources.Requests = requests
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(podToUpdate)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
ginkgo.By("Ensuring attempts to update pod resource requirements did not change quota usage")
|
||||
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
@@ -560,7 +561,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
||||
},
|
||||
}, resourceClient, testcrd.Crd)
|
||||
// since we only give one quota, this creation should fail.
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
ginkgo.By("Deleting a custom resource")
|
||||
err = deleteCustomResource(resourceClient, testcr.GetName())
|
||||
@@ -1051,7 +1052,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
|
||||
podName2 := "testpod-pclass2-2"
|
||||
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass2")
|
||||
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
ginkgo.By("Deleting first pod")
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
@@ -1591,7 +1592,7 @@ func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.R
|
||||
// verify that the quota shows the expected used resource values
|
||||
for k, v := range used {
|
||||
if actualValue, found := resourceQuota.Status.Used[k]; !found || (actualValue.Cmp(v) != 0) {
|
||||
framework.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String())
|
||||
e2elog.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String())
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@@ -52,15 +53,15 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
c := f.ClientSet
|
||||
|
||||
podName := "pod-1"
|
||||
framework.Logf("Creating pod %s", podName)
|
||||
e2elog.Logf("Creating pod %s", podName)
|
||||
|
||||
_, err := c.CoreV1().Pods(ns).Create(newTablePod(podName))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, ns)
|
||||
|
||||
table := &metav1beta1.Table{}
|
||||
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod %s in Table form in namespace: %s", podName, ns)
|
||||
framework.Logf("Table: %#v", table)
|
||||
framework.ExpectNoError(err, "failed to get pod %s in Table form in namespace: %s", podName, ns)
|
||||
e2elog.Logf("Table: %#v", table)
|
||||
|
||||
gomega.Expect(len(table.ColumnDefinitions)).To(gomega.BeNumerically(">", 2))
|
||||
gomega.Expect(len(table.Rows)).To(gomega.Equal(1))
|
||||
@@ -71,7 +72,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
out := printTable(table)
|
||||
gomega.Expect(out).To(gomega.MatchRegexp("^NAME\\s"))
|
||||
gomega.Expect(out).To(gomega.MatchRegexp("\npod-1\\s"))
|
||||
framework.Logf("Table:\n%s", out)
|
||||
e2elog.Logf("Table:\n%s", out)
|
||||
})
|
||||
|
||||
ginkgo.It("should return chunks of table results for list calls", func() {
|
||||
@@ -97,7 +98,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
framework.Logf("Got an error creating template %d: %v", i, err)
|
||||
e2elog.Logf("Got an error creating template %d: %v", i, err)
|
||||
}
|
||||
ginkgo.Fail("Unable to create template %d, exiting", i)
|
||||
})
|
||||
@@ -107,7 +108,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
VersionedParams(&metav1.ListOptions{Limit: 2}, metav1.ParameterCodec).
|
||||
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
|
||||
Do().Into(pagedTable)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns)
|
||||
framework.ExpectNoError(err, "failed to get pod templates in Table form in namespace: %s", ns)
|
||||
gomega.Expect(len(pagedTable.Rows)).To(gomega.Equal(2))
|
||||
gomega.Expect(pagedTable.ResourceVersion).ToNot(gomega.Equal(""))
|
||||
gomega.Expect(pagedTable.SelfLink).ToNot(gomega.Equal(""))
|
||||
@@ -119,7 +120,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
VersionedParams(&metav1.ListOptions{Continue: pagedTable.Continue}, metav1.ParameterCodec).
|
||||
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
|
||||
Do().Into(pagedTable)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns)
|
||||
framework.ExpectNoError(err, "failed to get pod templates in Table form in namespace: %s", ns)
|
||||
gomega.Expect(len(pagedTable.Rows)).To(gomega.BeNumerically(">", 0))
|
||||
gomega.Expect(pagedTable.Rows[0].Cells[0]).To(gomega.Equal("template-0002"))
|
||||
})
|
||||
@@ -129,8 +130,8 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
|
||||
table := &metav1beta1.Table{}
|
||||
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get nodes in Table form across all namespaces")
|
||||
framework.Logf("Table: %#v", table)
|
||||
framework.ExpectNoError(err, "failed to get nodes in Table form across all namespaces")
|
||||
e2elog.Logf("Table: %#v", table)
|
||||
|
||||
gomega.Expect(len(table.ColumnDefinitions)).To(gomega.BeNumerically(">=", 2))
|
||||
gomega.Expect(len(table.Rows)).To(gomega.BeNumerically(">=", 1))
|
||||
@@ -141,7 +142,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
|
||||
out := printTable(table)
|
||||
gomega.Expect(out).To(gomega.MatchRegexp("^NAME\\s"))
|
||||
framework.Logf("Table:\n%s", out)
|
||||
e2elog.Logf("Table:\n%s", out)
|
||||
})
|
||||
|
||||
ginkgo.It("should return a 406 for a backend which does not implement metadata", func() {
|
||||
@@ -167,7 +168,7 @@ func printTable(table *metav1beta1.Table) string {
|
||||
tw := tabwriter.NewWriter(buf, 5, 8, 1, ' ', 0)
|
||||
printer := printers.NewTablePrinter(printers.PrintOptions{})
|
||||
err := printer.PrintObj(table, tw)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to print table: %+v", table)
|
||||
framework.ExpectNoError(err, "failed to print table: %+v", table)
|
||||
tw.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
@@ -27,9 +27,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -57,15 +57,15 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
|
||||
ginkgo.By("creating a watch on configmaps with label A")
|
||||
watchA, err := watchConfigMaps(f, "", multipleWatchersLabelValueA)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueA)
|
||||
framework.ExpectNoError(err, "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueA)
|
||||
|
||||
ginkgo.By("creating a watch on configmaps with label B")
|
||||
watchB, err := watchConfigMaps(f, "", multipleWatchersLabelValueB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueB)
|
||||
framework.ExpectNoError(err, "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueB)
|
||||
|
||||
ginkgo.By("creating a watch on configmaps with label A or B")
|
||||
watchAB, err := watchConfigMaps(f, "", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps with label %s or %s", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
||||
framework.ExpectNoError(err, "failed to create a watch on configmaps with label %s or %s", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
||||
|
||||
testConfigMapA := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -86,7 +86,7 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
|
||||
ginkgo.By("creating a configmap with label A and ensuring the correct watchers observe the notification")
|
||||
testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapA)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns)
|
||||
framework.ExpectNoError(err, "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns)
|
||||
expectEvent(watchA, watch.Added, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Added, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Added, testConfigMapA)
|
||||
@@ -95,7 +95,7 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
expectEvent(watchA, watch.Modified, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Modified, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Modified, testConfigMapA)
|
||||
@@ -104,28 +104,28 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
expectEvent(watchA, watch.Modified, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Modified, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Modified, testConfigMapA)
|
||||
|
||||
ginkgo.By("deleting configmap A and ensuring the correct watchers observe the notification")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapA.GetName(), nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
expectEvent(watchA, watch.Deleted, nil)
|
||||
expectEvent(watchAB, watch.Deleted, nil)
|
||||
expectNoEvent(watchB, watch.Deleted, nil)
|
||||
|
||||
ginkgo.By("creating a configmap with label B and ensuring the correct watchers observe the notification")
|
||||
testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapB)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMapB, ns)
|
||||
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMapB, ns)
|
||||
expectEvent(watchB, watch.Added, testConfigMapB)
|
||||
expectEvent(watchAB, watch.Added, testConfigMapB)
|
||||
expectNoEvent(watchA, watch.Added, testConfigMapB)
|
||||
|
||||
ginkgo.By("deleting configmap B and ensuring the correct watchers observe the notification")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapB.GetName(), nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns)
|
||||
expectEvent(watchB, watch.Deleted, nil)
|
||||
expectEvent(watchAB, watch.Deleted, nil)
|
||||
expectNoEvent(watchA, watch.Deleted, nil)
|
||||
@@ -151,27 +151,27 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
|
||||
ginkgo.By("creating a new configmap")
|
||||
testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
|
||||
ginkgo.By("modifying the configmap once")
|
||||
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
|
||||
ginkgo.By("modifying the configmap a second time")
|
||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns)
|
||||
|
||||
ginkgo.By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
|
||||
ginkgo.By("creating a watch on configmaps from the resource version returned by the first update")
|
||||
testWatch, err := watchConfigMaps(f, testConfigMapFirstUpdate.ObjectMeta.ResourceVersion, fromResourceVersionLabelValue)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmaps from the resource version %s returned by the first update", testConfigMapFirstUpdate.ObjectMeta.ResourceVersion)
|
||||
framework.ExpectNoError(err, "failed to create a watch on configmaps from the resource version %s returned by the first update", testConfigMapFirstUpdate.ObjectMeta.ResourceVersion)
|
||||
|
||||
ginkgo.By("Expecting to observe notifications for all changes to the configmap after the first update")
|
||||
expectEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
|
||||
@@ -200,17 +200,17 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
|
||||
ginkgo.By("creating a watch on configmaps")
|
||||
testWatchBroken, err := watchConfigMaps(f, "", watchRestartedLabelValue)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmap with label: %s", watchRestartedLabelValue)
|
||||
framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", watchRestartedLabelValue)
|
||||
|
||||
ginkgo.By("creating a new configmap")
|
||||
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
ginkgo.By("modifying the configmap once")
|
||||
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
ginkgo.By("closing the watch once it receives two notifications")
|
||||
expectEvent(testWatchBroken, watch.Added, testConfigMap)
|
||||
@@ -224,7 +224,7 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||
|
||||
ginkgo.By("creating a new watch on configmaps from the last resource version observed by the first watch")
|
||||
lastEventConfigMap, ok := lastEvent.Object.(*v1.ConfigMap)
|
||||
@@ -232,11 +232,11 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
framework.Failf("Expected last notification to refer to a configmap but got: %v", lastEvent)
|
||||
}
|
||||
testWatchRestarted, err := watchConfigMaps(f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion)
|
||||
framework.ExpectNoError(err, "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion)
|
||||
|
||||
ginkgo.By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
ginkgo.By("Expecting to observe notifications for all changes to the configmap since the first watch closed")
|
||||
expectEvent(testWatchRestarted, watch.Modified, testConfigMapSecondUpdate)
|
||||
@@ -265,23 +265,23 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
|
||||
ginkgo.By("creating a watch on configmaps with a certain label")
|
||||
testWatch, err := watchConfigMaps(f, "", toBeChangedLabelValue)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create a watch on configmap with label: %s", toBeChangedLabelValue)
|
||||
framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", toBeChangedLabelValue)
|
||||
|
||||
ginkgo.By("creating a new configmap")
|
||||
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
ginkgo.By("modifying the configmap once")
|
||||
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
ginkgo.By("changing the label value of the configmap")
|
||||
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = "wrong-value"
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s by changing label value", configMapName, ns)
|
||||
|
||||
ginkgo.By("Expecting to observe a delete notification for the watched object")
|
||||
expectEvent(testWatch, watch.Added, testConfigMap)
|
||||
@@ -292,7 +292,7 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||
|
||||
ginkgo.By("Expecting not to observe a notification because the object no longer meets the selector's requirements")
|
||||
expectNoEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
|
||||
@@ -301,17 +301,17 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
testConfigMapLabelRestored, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = toBeChangedLabelValue
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value back", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s by changing label value back", configMapName, ns)
|
||||
|
||||
ginkgo.By("modifying the configmap a third time")
|
||||
testConfigMapThirdUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "3")
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update configmap %s in namespace %s a third time", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a third time", configMapName, ns)
|
||||
|
||||
ginkgo.By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
ginkgo.By("Expecting to observe an add notification for the watched object when the label value was restored")
|
||||
expectEvent(testWatch, watch.Added, testConfigMapLabelRestored)
|
||||
@@ -346,7 +346,7 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
resourceVersion := "0"
|
||||
for i := 0; i < iterations; i++ {
|
||||
wc, err := c.CoreV1().ConfigMaps(ns).Watch(metav1.ListOptions{ResourceVersion: resourceVersion})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to watch configmaps in the namespace %s", ns)
|
||||
framework.ExpectNoError(err, "Failed to watch configmaps in the namespace %s", ns)
|
||||
wcs = append(wcs, wc)
|
||||
resourceVersion = waitForNextConfigMapEvent(wcs[0]).ResourceVersion
|
||||
for _, wc := range wcs[1:] {
|
||||
@@ -413,7 +413,7 @@ func waitForEvent(w watch.Interface, expectType watch.EventType, expectObject ru
|
||||
select {
|
||||
case actual, ok := <-w.ResultChan():
|
||||
if ok {
|
||||
framework.Logf("Got : %v %v", actual.Type, actual.Object)
|
||||
e2elog.Logf("Got : %v %v", actual.Type, actual.Object)
|
||||
} else {
|
||||
framework.Failf("Watch closed unexpectedly")
|
||||
}
|
||||
@@ -472,18 +472,18 @@ func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWa
|
||||
case createEvent:
|
||||
cm.Name = name(i)
|
||||
_, err := c.CoreV1().ConfigMaps(ns).Create(cm)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create configmap %s in namespace %s", cm.Name, ns)
|
||||
framework.ExpectNoError(err, "Failed to create configmap %s in namespace %s", cm.Name, ns)
|
||||
existing = append(existing, i)
|
||||
i++
|
||||
case updateEvent:
|
||||
idx := rand.Intn(len(existing))
|
||||
cm.Name = name(existing[idx])
|
||||
_, err := c.CoreV1().ConfigMaps(ns).Update(cm)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to update configmap %s in namespace %s", cm.Name, ns)
|
||||
framework.ExpectNoError(err, "Failed to update configmap %s in namespace %s", cm.Name, ns)
|
||||
case deleteEvent:
|
||||
idx := rand.Intn(len(existing))
|
||||
err := c.CoreV1().ConfigMaps(ns).Delete(name(existing[idx]), &metav1.DeleteOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns)
|
||||
framework.ExpectNoError(err, "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns)
|
||||
existing = append(existing[:idx], existing[idx+1:]...)
|
||||
default:
|
||||
framework.Failf("Unsupported event operation: %d", op)
|
||||
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
"k8s.io/utils/pointer"
|
||||
@@ -267,7 +268,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) {
|
||||
},
|
||||
})
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
framework.Logf("role binding %s already exists", roleBindingName)
|
||||
e2elog.Logf("role binding %s already exists", roleBindingName)
|
||||
} else {
|
||||
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
|
||||
}
|
||||
@@ -717,7 +718,7 @@ func testWebhook(f *framework.Framework) {
|
||||
},
|
||||
}
|
||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
||||
toNonCompliantFn := func(cm *v1.ConfigMap) {
|
||||
@@ -754,7 +755,7 @@ func testWebhook(f *framework.Framework) {
|
||||
ginkgo.By("create a configmap that violates the webhook policy but is in a whitelisted namespace")
|
||||
configmap = nonCompliantConfigMap(f)
|
||||
_, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(configmap)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName)
|
||||
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName)
|
||||
}
|
||||
|
||||
func testAttachingPodWebhook(f *framework.Framework) {
|
||||
@@ -762,9 +763,9 @@ func testAttachingPodWebhook(f *framework.Framework) {
|
||||
client := f.ClientSet
|
||||
pod := toBeAttachedPod(f)
|
||||
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
err = framework.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("'kubectl attach' the pod, should be denied by the webhook")
|
||||
timer := time.NewTimer(30 * time.Second)
|
||||
@@ -1344,7 +1345,7 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension
|
||||
},
|
||||
}
|
||||
mutatedCR, err := customResourceClient.Create(cr, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
expectedCRData := map[string]interface{}{
|
||||
"mutation-start": "yes",
|
||||
"mutation-stage-1": "yes",
|
||||
@@ -1373,17 +1374,17 @@ func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd.
|
||||
},
|
||||
}
|
||||
_, err := customResourceClient.Create(cr, metav1.CreateOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Patching Custom Resource Definition to set v2 as storage")
|
||||
apiVersionWithV2StoragePatch := fmt.Sprint(`{"spec": {"versions": [{"name": "v1", "storage": false, "served": true},{"name": "v2", "storage": true, "served": true}]}}`)
|
||||
_, err = testcrd.APIExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Patch(testcrd.Crd.Name, types.StrategicMergePatchType, []byte(apiVersionWithV2StoragePatch))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to patch custom resource definition %s in namespace: %s", testcrd.Crd.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to patch custom resource definition %s in namespace: %s", testcrd.Crd.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Patching the custom resource while v2 is storage version")
|
||||
crDummyPatch := fmt.Sprint(`[{ "op": "add", "path": "/dummy", "value": "test" }]`)
|
||||
_, err = testcrd.DynamicClients["v2"].Patch(crName, types.JSONPatchType, []byte(crDummyPatch), metav1.PatchOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to patch custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to patch custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
func registerValidatingWebhookForCRD(f *framework.Framework, context *certContext) func() {
|
||||
|
||||
@@ -59,10 +59,12 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/replicaset:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
|
||||
@@ -35,6 +35,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@@ -90,7 +91,7 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
|
||||
ginkgo.By("Ensuring no jobs are scheduled")
|
||||
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, false)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
ginkgo.By("Ensuring no job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
@@ -127,7 +128,7 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
|
||||
ginkgo.By("Ensuring no more jobs are scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
@@ -182,7 +183,7 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
|
||||
ginkgo.By("Ensuring no unexpected event has happened")
|
||||
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
ginkgo.By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
@@ -212,7 +213,7 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
|
||||
ginkgo.By("Ensuring job was deleted")
|
||||
_, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
|
||||
ginkgo.By("Ensuring the job is not in the cronjob active list")
|
||||
@@ -423,7 +424,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error
|
||||
if len(aliveJobs) > 1 {
|
||||
return false, fmt.Errorf("More than one job is running %+v", jobs.Items)
|
||||
} else if len(aliveJobs) == 0 {
|
||||
framework.Logf("Warning: Found 0 jobs in namespace %v", ns)
|
||||
e2elog.Logf("Warning: Found 0 jobs in namespace %v", ns)
|
||||
return false, nil
|
||||
}
|
||||
return aliveJobs[0].Name != previousJobName, nil
|
||||
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@@ -72,7 +73,7 @@ type RestartDaemonConfig struct {
|
||||
// NewRestartConfig creates a RestartDaemonConfig for the given node and daemon.
|
||||
func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *RestartDaemonConfig {
|
||||
if !framework.ProviderIs("gce") {
|
||||
framework.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider)
|
||||
e2elog.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider)
|
||||
}
|
||||
return &RestartDaemonConfig{
|
||||
nodeName: nodeName,
|
||||
@@ -89,7 +90,7 @@ func (r *RestartDaemonConfig) String() string {
|
||||
|
||||
// waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout
|
||||
func (r *RestartDaemonConfig) waitUp() {
|
||||
framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r)
|
||||
e2elog.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r)
|
||||
healthzCheck := fmt.Sprintf(
|
||||
"curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort)
|
||||
|
||||
@@ -99,12 +100,12 @@ func (r *RestartDaemonConfig) waitUp() {
|
||||
if result.Code == 0 {
|
||||
httpCode, err := strconv.Atoi(result.Stdout)
|
||||
if err != nil {
|
||||
framework.Logf("Unable to parse healthz http return code: %v", err)
|
||||
e2elog.Logf("Unable to parse healthz http return code: %v", err)
|
||||
} else if httpCode == 200 {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
framework.Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v",
|
||||
e2elog.Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v",
|
||||
r.nodeName, healthzCheck, result.Code, result.Stdout, result.Stderr)
|
||||
return false, nil
|
||||
})
|
||||
@@ -113,7 +114,7 @@ func (r *RestartDaemonConfig) waitUp() {
|
||||
|
||||
// kill sends a SIGTERM to the daemon
|
||||
func (r *RestartDaemonConfig) kill() {
|
||||
framework.Logf("Killing %v", r)
|
||||
e2elog.Logf("Killing %v", r)
|
||||
_, err := e2essh.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName), framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
@@ -301,7 +302,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
|
||||
if preRestarts != 0 {
|
||||
framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes)
|
||||
e2elog.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes)
|
||||
}
|
||||
for _, ip := range nodeIPs {
|
||||
restarter := NewRestartConfig(
|
||||
@@ -310,7 +311,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
}
|
||||
postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
|
||||
if postRestarts != preRestarts {
|
||||
framework.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf)
|
||||
framework.DumpNodeDebugInfo(f.ClientSet, badNodes, e2elog.Logf)
|
||||
framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
@@ -68,27 +69,27 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
// Clean up
|
||||
daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to dump DaemonSets")
|
||||
framework.ExpectNoError(err, "unable to dump DaemonSets")
|
||||
if daemonsets != nil && len(daemonsets.Items) > 0 {
|
||||
for _, ds := range daemonsets.Items {
|
||||
ginkgo.By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name))
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to be reaped")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to be reaped")
|
||||
}
|
||||
}
|
||||
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets))
|
||||
e2elog.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets))
|
||||
} else {
|
||||
framework.Logf("unable to dump daemonsets: %v", err)
|
||||
e2elog.Logf("unable to dump daemonsets: %v", err)
|
||||
}
|
||||
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods))
|
||||
e2elog.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods))
|
||||
} else {
|
||||
framework.Logf("unable to dump pods: %v", err)
|
||||
e2elog.Logf("unable to dump pods: %v", err)
|
||||
}
|
||||
err = clearDaemonSetNodeLabels(f.ClientSet)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
f = framework.NewDefaultFramework("daemonsets")
|
||||
@@ -105,12 +106,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
c = f.ClientSet
|
||||
|
||||
updatedNS, err := updateNamespaceAnnotations(c, ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ns = updatedNS.Name
|
||||
|
||||
err = clearDaemonSetNodeLabels(c)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -123,21 +124,21 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.")
|
||||
podList := listDaemonPods(c, ns, label)
|
||||
pod := podList.Items[0]
|
||||
err = c.CoreV1().Pods(ns).Delete(pod.Name, nil)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to revive")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to revive")
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -148,46 +149,46 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ConformanceIt("should run and stop complex daemon", func() {
|
||||
complexLabel := map[string]string{daemonsetNameLabel: dsName}
|
||||
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
|
||||
framework.Logf("Creating daemon %q with a node selector", dsName)
|
||||
e2elog.Logf("Creating daemon %q with a node selector", dsName)
|
||||
ds := newDaemonSet(dsName, image, complexLabel)
|
||||
ds.Spec.Template.Spec.NodeSelector = nodeSelector
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
ginkgo.By("Change node label to blue, check that daemon pod is launched.")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error setting labels on node")
|
||||
framework.ExpectNoError(err, "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled")
|
||||
nodeSelector[daemonsetColorLabel] = "green"
|
||||
greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error removing labels on node")
|
||||
gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
||||
NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
framework.ExpectNoError(err, "error removing labels on node")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
|
||||
|
||||
ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
|
||||
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
|
||||
daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel])
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error patching daemon set")
|
||||
framework.ExpectNoError(err, "error patching daemon set")
|
||||
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
|
||||
gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
// We defer adding this test to conformance pending the disposition of moving DaemonSet scheduling logic to the
|
||||
@@ -195,7 +196,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
ginkgo.It("should run and stop complex daemon with node affinity", func() {
|
||||
complexLabel := map[string]string{daemonsetNameLabel: dsName}
|
||||
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
|
||||
framework.Logf("Creating daemon %q with a node affinity", dsName)
|
||||
e2elog.Logf("Creating daemon %q with a node affinity", dsName)
|
||||
ds := newDaemonSet(dsName, image, complexLabel)
|
||||
ds.Spec.Template.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
@@ -215,29 +216,29 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
},
|
||||
}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
ginkgo.By("Change node label to blue, check that daemon pod is launched.")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
gomega.Expect(len(nodeList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error setting labels on node")
|
||||
framework.ExpectNoError(err, "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
gomega.Expect(len(daemonSetLabels)).To(gomega.Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
|
||||
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error removing labels on node")
|
||||
gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
||||
NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
framework.ExpectNoError(err, "error removing labels on node")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -249,13 +250,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.")
|
||||
podList := listDaemonPods(c, ns, label)
|
||||
@@ -263,13 +264,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
pod.ResourceVersion = ""
|
||||
pod.Status.Phase = v1.PodFailed
|
||||
_, err = c.CoreV1().Pods(ns).UpdateStatus(&pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error failing a daemon pod")
|
||||
framework.ExpectNoError(err, "error failing a daemon pod")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to revive")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to revive")
|
||||
|
||||
ginkgo.By("Wait for the failed daemon pod to be completely deleted.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for the failed daemon pod to be completely deleted")
|
||||
framework.ExpectNoError(err, "error waiting for the failed daemon pod to be completely deleted")
|
||||
})
|
||||
|
||||
// This test should not be added to conformance. We will consider deprecating OnDelete when the
|
||||
@@ -277,19 +278,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
ginkgo.It("should not update pod when spec was updated and update strategy is OnDelete", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
e2elog.Logf("Creating simple daemon set %s", dsName)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.OnDeleteDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
first := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
firstHash := first.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
@@ -299,19 +300,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
ginkgo.By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods images aren't updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
gomega.Expect(cur.Revision).To(gomega.Equal(int64(2)))
|
||||
@@ -326,19 +327,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
e2elog.Logf("Creating simple daemon set %s", dsName)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
@@ -348,26 +349,26 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
ginkgo.By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Time to complete the rolling upgrade is proportional to the number of nodes in the cluster.
|
||||
// Get the number of nodes, and set the timeout appropriately.
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
nodeCount := len(nodes.Items)
|
||||
retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second
|
||||
|
||||
ginkgo.By("Check that daemon pods images are updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur = curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash = cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
@@ -383,28 +384,28 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ConformanceIt("should rollback without unnecessary restarts", func() {
|
||||
schedulableNodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.")
|
||||
framework.Logf("Create a RollingUpdate DaemonSet")
|
||||
e2elog.Logf("Create a RollingUpdate DaemonSet")
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
e2elog.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
framework.ExpectNoError(err, "error waiting for daemon pod to start")
|
||||
|
||||
framework.Logf("Update the DaemonSet to trigger a rollout")
|
||||
e2elog.Logf("Update the DaemonSet to trigger a rollout")
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
newImage := "foo:non-existent"
|
||||
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Make sure we're in the middle of a rollout
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
var existingPods, newPods []*v1.Pod
|
||||
@@ -428,15 +429,15 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
}
|
||||
gomega.Expect(len(newPods)).NotTo(gomega.Equal(0))
|
||||
|
||||
framework.Logf("Roll back the DaemonSet before rollout is complete")
|
||||
e2elog.Logf("Roll back the DaemonSet before rollout is complete")
|
||||
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Make sure DaemonSet rollback is complete")
|
||||
e2elog.Logf("Make sure DaemonSet rollback is complete")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted
|
||||
pods = listDaemonPods(c, ns, label)
|
||||
@@ -486,7 +487,7 @@ func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(ns).List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(podList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
return podList
|
||||
}
|
||||
@@ -561,7 +562,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
|
||||
return true, err
|
||||
}
|
||||
if se, ok := err.(*apierrors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
|
||||
framework.Logf("failed to update node due to resource version conflict")
|
||||
e2elog.Logf("failed to update node due to resource version conflict")
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
@@ -579,7 +580,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames
|
||||
return func() (bool, error) {
|
||||
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("could not get the pod list: %v", err)
|
||||
e2elog.Logf("could not get the pod list: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
pods := podList.Items
|
||||
@@ -596,17 +597,17 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames
|
||||
nodesToPodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
}
|
||||
framework.Logf("Number of nodes with available pods: %d", len(nodesToPodCount))
|
||||
e2elog.Logf("Number of nodes with available pods: %d", len(nodesToPodCount))
|
||||
|
||||
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
|
||||
for _, nodeName := range nodeNames {
|
||||
if nodesToPodCount[nodeName] != 1 {
|
||||
framework.Logf("Node %s is running more than one daemon pod", nodeName)
|
||||
e2elog.Logf("Node %s is running more than one daemon pod", nodeName)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("Number of running nodes: %d, number of available pods: %d", len(nodeNames), len(nodesToPodCount))
|
||||
e2elog.Logf("Number of running nodes: %d, number of available pods: %d", len(nodeNames), len(nodesToPodCount))
|
||||
// Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in
|
||||
// nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any
|
||||
// other nodes.
|
||||
@@ -627,7 +628,7 @@ func schedulableNodes(c clientset.Interface, ds *apps.DaemonSet) []string {
|
||||
nodeNames := make([]string, 0)
|
||||
for _, node := range nodeList.Items {
|
||||
if !canScheduleOnNode(node, ds) {
|
||||
framework.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints)
|
||||
e2elog.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints)
|
||||
continue
|
||||
}
|
||||
nodeNames = append(nodeNames, node.Name)
|
||||
@@ -692,12 +693,12 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonS
|
||||
}
|
||||
podImage := pod.Spec.Containers[0].Image
|
||||
if podImage != image {
|
||||
framework.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage)
|
||||
e2elog.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage)
|
||||
} else {
|
||||
nodesToUpdatedPodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
if !podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
framework.Logf("Pod %s is not available", pod.Name)
|
||||
e2elog.Logf("Pod %s is not available", pod.Name)
|
||||
unavailablePods++
|
||||
}
|
||||
}
|
||||
@@ -736,18 +737,18 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st
|
||||
if len(historyList.Items) == numHistory {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory)
|
||||
e2elog.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory)
|
||||
return false, nil
|
||||
}
|
||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, listHistoryFn)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for controllerrevisions to be created")
|
||||
framework.ExpectNoError(err, "error waiting for controllerrevisions to be created")
|
||||
}
|
||||
|
||||
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *apps.ControllerRevisionList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
historyList, err := c.AppsV1().ControllerRevisions(ns).List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(historyList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
return historyList
|
||||
}
|
||||
@@ -760,7 +761,7 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
|
||||
// Every history should have the hash label
|
||||
gomega.Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(gomega.BeNumerically(">", 0))
|
||||
match, err := daemon.Match(ds, history)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
if match {
|
||||
curHistory = history
|
||||
foundCurHistories++
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||
testutil "k8s.io/kubernetes/test/utils"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
@@ -128,49 +129,49 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
func failureTrap(c clientset.Interface, ns string) {
|
||||
deployments, err := c.AppsV1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
|
||||
e2elog.Logf("Could not list Deployments in namespace %q: %v", ns, err)
|
||||
return
|
||||
}
|
||||
for i := range deployments.Items {
|
||||
d := deployments.Items[i]
|
||||
|
||||
framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
|
||||
e2elog.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
|
||||
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.AppsV1())
|
||||
if err != nil {
|
||||
framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
|
||||
e2elog.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
|
||||
return
|
||||
}
|
||||
testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, framework.Logf)
|
||||
testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, e2elog.Logf)
|
||||
rsList := allOldRSs
|
||||
if newRS != nil {
|
||||
rsList = append(rsList, newRS)
|
||||
}
|
||||
testutil.LogPodsOfDeployment(c, &d, rsList, framework.Logf)
|
||||
testutil.LogPodsOfDeployment(c, &d, rsList, e2elog.Logf)
|
||||
}
|
||||
// We need print all the ReplicaSets if there are no Deployment object created
|
||||
if len(deployments.Items) != 0 {
|
||||
return
|
||||
}
|
||||
framework.Logf("Log out all the ReplicaSets if there is no deployment created")
|
||||
e2elog.Logf("Log out all the ReplicaSets if there is no deployment created")
|
||||
rss, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
|
||||
e2elog.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
|
||||
return
|
||||
}
|
||||
for _, rs := range rss.Items {
|
||||
framework.Logf(spew.Sprintf("ReplicaSet %q:\n%+v\n", rs.Name, rs))
|
||||
e2elog.Logf(spew.Sprintf("ReplicaSet %q:\n%+v\n", rs.Name, rs))
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err)
|
||||
e2elog.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err)
|
||||
}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(rs.Namespace).List(options)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err)
|
||||
e2elog.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err)
|
||||
continue
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
framework.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod))
|
||||
e2elog.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -190,23 +191,24 @@ func newDeploymentRollback(name string, annotations map[string]string, revision
|
||||
|
||||
func stopDeployment(c clientset.Interface, ns, deploymentName string) {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Deleting deployment %s", deploymentName)
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name))
|
||||
e2elog.Logf("Deleting deployment %s", deploymentName)
|
||||
err = framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||
e2elog.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||
_, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||
e2elog.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rss, err := c.AppsV1().ReplicaSets(ns).List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(rss.Items).Should(gomega.HaveLen(0))
|
||||
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
|
||||
e2elog.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
|
||||
var pods *v1.PodList
|
||||
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
|
||||
pods, err = c.CoreV1().Pods(ns).List(options)
|
||||
@@ -230,23 +232,23 @@ func testDeleteDeployment(f *framework.Framework) {
|
||||
deploymentName := "test-new-deployment"
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
framework.Logf("Creating simple deployment %s", deploymentName)
|
||||
e2elog.Logf("Creating simple deployment %s", deploymentName)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(newRS).NotTo(gomega.Equal(nilRs))
|
||||
stopDeployment(c, ns, deploymentName)
|
||||
}
|
||||
@@ -268,35 +270,35 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
annotations[deploymentutil.RevisionAnnotation] = rsRevision
|
||||
rs := newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)
|
||||
rs.Annotations = annotations
|
||||
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
|
||||
e2elog.Logf("Creating replica set %q (going to be adopted)", rs.Name)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(rs)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %s", err)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-rolling-update-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 3546343826724305833.
|
||||
framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
|
||||
e2elog.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
|
||||
e2elog.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// There should be 1 old RS (nginx-controller, which is adopted)
|
||||
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
|
||||
e2elog.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(allOldRSs)).Should(gomega.Equal(1))
|
||||
}
|
||||
|
||||
@@ -306,29 +308,31 @@ func testRecreateDeployment(f *framework.Framework) {
|
||||
|
||||
// Create a deployment that brings up redis pods.
|
||||
deploymentName := "test-recreate-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
|
||||
e2elog.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Waiting deployment %q to complete", deploymentName)
|
||||
gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Waiting deployment %q to complete", deploymentName)
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deployment)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Update deployment to delete redis pods and bring up nginx pods.
|
||||
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
|
||||
e2elog.Logf("Triggering a new rollout for deployment %q", deploymentName)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = NginxImage
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
|
||||
gomega.Expect(e2edeploy.WatchRecreateDeployment(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
|
||||
err = e2edeploy.WatchRecreateDeployment(c, deployment)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
|
||||
@@ -345,18 +349,18 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
replicas := int32(1)
|
||||
revisionHistoryLimit := utilpointer.Int32Ptr(0)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-cleanup-deployment"
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
e2elog.Logf("Creating deployment %s", deploymentName)
|
||||
|
||||
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to query for pods: %v", err)
|
||||
framework.ExpectNoError(err, "Failed to query for pods: %v", err)
|
||||
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
@@ -364,7 +368,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
w, err := c.CoreV1().Pods(ns).Watch(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
go func() {
|
||||
// There should be only one pod being created, which is the pod with the redis image.
|
||||
// The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector.
|
||||
@@ -394,11 +398,11 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
|
||||
_, err = c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
|
||||
err = e2edeploy.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// testRolloverDeployment tests that deployment supports rollover.
|
||||
@@ -416,14 +420,15 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
rsName := "test-rollover-controller"
|
||||
rsReplicas := int32(1)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Wait for replica set to become ready before adopting it.
|
||||
framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
|
||||
gomega.Expect(replicaset.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
|
||||
err = replicaset.WaitForReadyReplicaSet(c, ns, rsName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis-slave pods.
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
@@ -431,7 +436,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
|
||||
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
newDeployment := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{
|
||||
MaxUnavailable: intOrStrP(0),
|
||||
@@ -439,57 +444,57 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
}
|
||||
newDeployment.Spec.MinReadySeconds = int32(10)
|
||||
_, err = c.AppsV1().Deployments(ns).Create(newDeployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Verify that the pods were scaled up and down as expected.
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
framework.ExpectNoError(err)
|
||||
e2elog.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
|
||||
err = e2edeploy.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
|
||||
// Check if it's updated to revision 1 correctly
|
||||
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
|
||||
e2elog.Logf("Check revision of new replica set for deployment %q", deploymentName)
|
||||
err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Ensure that both replica sets have 1 created replica")
|
||||
e2elog.Logf("Ensure that both replica sets have 1 created replica")
|
||||
oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
ensureReplicas(oldRS, int32(1))
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
ensureReplicas(newRS, int32(1))
|
||||
|
||||
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
|
||||
framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
|
||||
e2elog.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
|
||||
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Use observedGeneration to determine if the controller noticed the pod template update.
|
||||
framework.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName)
|
||||
e2elog.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName)
|
||||
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 2
|
||||
framework.Logf("Wait for revision update of deployment %q to 2", deploymentName)
|
||||
e2elog.Logf("Wait for revision update of deployment %q to 2", deploymentName)
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Make sure deployment %q is complete", deploymentName)
|
||||
e2elog.Logf("Make sure deployment %q is complete", deploymentName)
|
||||
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Ensure that both old replica sets have no replicas")
|
||||
e2elog.Logf("Ensure that both old replica sets have no replicas")
|
||||
oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
ensureReplicas(oldRS, int32(0))
|
||||
// Not really the new replica set anymore but we GET by name so that's fine.
|
||||
newRS, err = c.AppsV1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
ensureReplicas(newRS, int32(0))
|
||||
}
|
||||
|
||||
@@ -514,23 +519,23 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := NginxImage
|
||||
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
e2elog.Logf("Creating deployment %s", deploymentName)
|
||||
d := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
createAnnotation := map[string]string{"action": "create", "author": "node"}
|
||||
d.Annotations = createAnnotation
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Current newRS annotation should be "create"
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// 2. Update the deployment to create redis pods.
|
||||
updatedDeploymentImage := RedisImage
|
||||
@@ -541,112 +546,112 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
update.Annotations = updateAnnotation
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Use observedGeneration to determine if the controller noticed the pod template update.
|
||||
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 2
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Current newRS annotation should be "update"
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// 3. Update the deploymentRollback to rollback to revision 1
|
||||
revision := int64(1)
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback := newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// TODO: report RollbackDone in deployment status and check it here
|
||||
|
||||
// Wait for it to be updated to revision 3
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Current newRS annotation should be "create", after the rollback
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// 4. Update the deploymentRollback to rollback to last revision
|
||||
revision = 0
|
||||
framework.Logf("rolling back deployment %s to last revision", deploymentName)
|
||||
e2elog.Logf("rolling back deployment %s to last revision", deploymentName)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for it to be updated to revision 4
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Current newRS annotation should be "update", after the rollback
|
||||
err = replicaset.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// 5. Update the deploymentRollback to rollback to revision 10
|
||||
// Since there's no revision 10 in history, it should stay as revision 4
|
||||
revision = 10
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// TODO: report RollbackRevisionNotFound in deployment status and check it here
|
||||
|
||||
// The pod template shouldn't change since there's no revision 10
|
||||
// Check if it's still revision 4 and still has the old pod template
|
||||
err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// 6. Update the deploymentRollback to rollback to revision 4
|
||||
// Since it's already revision 4, it should be no-op
|
||||
revision = 4
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = e2edeploy.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// TODO: report RollbackTemplateUnchanged in deployment status and check it here
|
||||
|
||||
// The pod template shouldn't change since it's already revision 4
|
||||
// Check if it's still revision 4 and still has the old pod template
|
||||
err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func randomScale(d *apps.Deployment, i int) {
|
||||
switch r := rand.Float32(); {
|
||||
case r < 0.3:
|
||||
framework.Logf("%02d: scaling up", i)
|
||||
e2elog.Logf("%02d: scaling up", i)
|
||||
*(d.Spec.Replicas)++
|
||||
case r < 0.6:
|
||||
if *(d.Spec.Replicas) > 1 {
|
||||
framework.Logf("%02d: scaling down", i)
|
||||
e2elog.Logf("%02d: scaling down", i)
|
||||
*(d.Spec.Replicas)--
|
||||
}
|
||||
}
|
||||
@@ -668,9 +673,9 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
d.Spec.ProgressDeadlineSeconds = &thirty
|
||||
d.Spec.RevisionHistoryLimit = &two
|
||||
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
iterations := 20
|
||||
for i := 0; i < iterations; i++ {
|
||||
@@ -681,61 +686,61 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
switch n := rand.Float32(); {
|
||||
case n < 0.2:
|
||||
// trigger a new deployment
|
||||
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
|
||||
e2elog.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
|
||||
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
|
||||
randomScale(update, i)
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
case n < 0.4:
|
||||
// rollback to the previous version
|
||||
framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
|
||||
e2elog.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
if update.Annotations == nil {
|
||||
update.Annotations = make(map[string]string)
|
||||
}
|
||||
update.Annotations[apps.DeprecatedRollbackTo] = "0"
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
case n < 0.6:
|
||||
// just scaling
|
||||
framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
|
||||
e2elog.Logf("%02d: scaling deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
randomScale(update, i)
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
case n < 0.8:
|
||||
// toggling the deployment
|
||||
if deployment.Spec.Paused {
|
||||
framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
|
||||
e2elog.Logf("%02d: pausing deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = true
|
||||
randomScale(update, i)
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
} else {
|
||||
framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
|
||||
e2elog.Logf("%02d: resuming deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
randomScale(update, i)
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
default:
|
||||
// arbitrarily delete deployment pods
|
||||
framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
|
||||
e2elog.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
opts := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(ns).List(opts)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
if len(podList.Items) == 0 {
|
||||
framework.Logf("%02d: no deployment pods to delete", i)
|
||||
e2elog.Logf("%02d: no deployment pods to delete", i)
|
||||
continue
|
||||
}
|
||||
for p := range podList.Items {
|
||||
@@ -743,10 +748,10 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
continue
|
||||
}
|
||||
name := podList.Items[p].Name
|
||||
framework.Logf("%02d: deleting deployment pod %q", i, name)
|
||||
e2elog.Logf("%02d: deleting deployment pod %q", i, name)
|
||||
err := c.CoreV1().Pods(ns).Delete(name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -754,21 +759,24 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
|
||||
// unpause the deployment if we end up pausing it
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
if deployment.Spec.Paused {
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
})
|
||||
}
|
||||
|
||||
framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
|
||||
gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
|
||||
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Waiting for deployment %q status", deploymentName)
|
||||
gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Waiting for deployment %q status", deploymentName)
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deployment)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
|
||||
gomega.Expect(e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Checking deployment %q for a complete condition", deploymentName)
|
||||
err = e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
@@ -776,51 +784,51 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
c := f.ClientSet
|
||||
|
||||
deploymentName := "test-orphan-deployment"
|
||||
framework.Logf("Creating Deployment %q", deploymentName)
|
||||
e2elog.Logf("Creating Deployment %q", deploymentName)
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
|
||||
e2elog.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
|
||||
rsList := listDeploymentReplicaSets(c, ns, podLabels)
|
||||
gomega.Expect(len(rsList.Items)).Should(gomega.Equal(1))
|
||||
|
||||
framework.Logf("Obtaining the ReplicaSet's UID")
|
||||
e2elog.Logf("Obtaining the ReplicaSet's UID")
|
||||
orphanedRSUID := rsList.Items[0].UID
|
||||
|
||||
framework.Logf("Checking the ReplicaSet has the right controllerRef")
|
||||
e2elog.Logf("Checking the ReplicaSet has the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName)
|
||||
e2elog.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName)
|
||||
err = orphanDeploymentReplicaSets(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Wait for the ReplicaSet to be orphaned")
|
||||
err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for Deployment ReplicaSet to be orphaned")
|
||||
framework.ExpectNoError(err, "error waiting for Deployment ReplicaSet to be orphaned")
|
||||
|
||||
deploymentName = "test-adopt-deployment"
|
||||
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
|
||||
e2elog.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
|
||||
d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err = c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
|
||||
e2elog.Logf("Waiting for the ReplicaSet to have the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
|
||||
e2elog.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
|
||||
rsList = listDeploymentReplicaSets(c, ns, podLabels)
|
||||
gomega.Expect(len(rsList.Items)).Should(gomega.Equal(1))
|
||||
|
||||
framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
|
||||
e2elog.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
|
||||
gomega.Expect(rsList.Items[0].UID).Should(gomega.Equal(orphanedRSUID))
|
||||
}
|
||||
|
||||
@@ -841,107 +849,116 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
|
||||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
|
||||
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
framework.Logf("Waiting for all required pods to come up")
|
||||
e2elog.Logf("Waiting for all required pods to come up")
|
||||
err = framework.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
|
||||
|
||||
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
|
||||
gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Waiting for deployment %q to complete", deployment.Name)
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deployment)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Update the deployment with a non-existent image so that the new replica set
|
||||
// will be blocked to simulate a partial rollout.
|
||||
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
|
||||
e2elog.Logf("Updating deployment %q with a non-existent image", deploymentName)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Checking state of first rollout's replicaset.
|
||||
maxUnavailable, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, int(*(deployment.Spec.Replicas)), false)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas.
|
||||
minAvailableReplicas := replicas - int32(maxUnavailable)
|
||||
framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
|
||||
err = replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 too.
|
||||
framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas)
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas)
|
||||
err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
e2elog.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Checking state of second rollout's replicaset.
|
||||
secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
maxSurge, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Second rollout's replicaset should have 0 available replicas.
|
||||
framework.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0")
|
||||
e2elog.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0")
|
||||
gomega.Expect(secondRS.Status.AvailableReplicas).Should(gomega.Equal(int32(0)))
|
||||
|
||||
// Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
|
||||
newReplicas := replicas + int32(maxSurge) - minAvailableReplicas
|
||||
framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas)
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas)
|
||||
err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
e2elog.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Check the deployment's minimum availability.
|
||||
framework.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName)
|
||||
e2elog.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName)
|
||||
if deployment.Status.AvailableReplicas < minAvailableReplicas {
|
||||
gomega.Expect(fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)).NotTo(gomega.HaveOccurred())
|
||||
err = fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// Scale the deployment to 30 replicas.
|
||||
newReplicas = int32(30)
|
||||
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
|
||||
e2elog.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
|
||||
e2elog.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
|
||||
// Note that 12 comes from rounding (30-10)*(8/13) to nearest integer.
|
||||
framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20")
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20")
|
||||
err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas.
|
||||
// Note that 8 comes from rounding (30-10)*(5/13) to nearest integer.
|
||||
framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13")
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13")
|
||||
err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
|
||||
@@ -972,7 +989,7 @@ func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[strin
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rsList, err := c.AppsV1().ReplicaSets(ns).List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(len(rsList.Items)).To(gomega.BeNumerically(">", 0))
|
||||
return rsList
|
||||
}
|
||||
|
||||
@@ -30,7 +30,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@@ -163,22 +165,7 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
}
|
||||
|
||||
// Locate a running pod.
|
||||
var pod v1.Pod
|
||||
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for i := range podList.Items {
|
||||
if podList.Items[i].Status.Phase == v1.PodRunning {
|
||||
pod = podList.Items[i]
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
pod, err := locateRunningPod(cs, ns)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e := &policy.Eviction{
|
||||
@@ -189,10 +176,6 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
}
|
||||
|
||||
if c.shouldDeny {
|
||||
// Since disruptionAllowed starts out false, wait at least 60s hoping that
|
||||
// this gives the controller enough time to have truly set the status.
|
||||
time.Sleep(timeout)
|
||||
|
||||
err = cs.CoreV1().Pods(ns).Evict(e)
|
||||
gomega.Expect(err).Should(gomega.MatchError("Cannot evict pod as it would violate the pod's disruption budget."))
|
||||
} else {
|
||||
@@ -214,6 +197,34 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
ginkgo.It("should block an eviction until the PDB is updated to allow it", func() {
|
||||
ginkgo.By("Creating a pdb that targets all three pods in a test replica set")
|
||||
createPDBMinAvailableOrDie(cs, ns, intstr.FromInt(3))
|
||||
createReplicaSetOrDie(cs, ns, 3, false)
|
||||
|
||||
ginkgo.By("First trying to evict a pod which shouldn't be evictable")
|
||||
pod, err := locateRunningPod(cs, ns)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
waitForPodsOrDie(cs, ns, 3) // make sure that they are running and so would be evictable with a different pdb
|
||||
e := &policy.Eviction{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
err = cs.CoreV1().Pods(ns).Evict(e)
|
||||
gomega.Expect(err).Should(gomega.MatchError("Cannot evict pod as it would violate the pod's disruption budget."))
|
||||
|
||||
ginkgo.By("Updating the pdb to allow a pod to be evicted")
|
||||
updatePDBMinAvailableOrDie(cs, ns, intstr.FromInt(2))
|
||||
|
||||
ginkgo.By("Trying to evict the same pod we tried earlier which should now be evictable")
|
||||
waitForPodsOrDie(cs, ns, 3)
|
||||
err = cs.CoreV1().Pods(ns).Evict(e)
|
||||
framework.ExpectNoError(err) // the eviction is now allowed
|
||||
})
|
||||
})
|
||||
|
||||
func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable intstr.IntOrString) {
|
||||
@@ -228,7 +239,8 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable
|
||||
},
|
||||
}
|
||||
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(err, "Waiting for the pdb to be created with minAvailable %d in namespace %s", minAvailable.IntVal, ns)
|
||||
waitForPdbToBeProcessed(cs, ns)
|
||||
}
|
||||
|
||||
func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavailable intstr.IntOrString) {
|
||||
@@ -243,7 +255,25 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavail
|
||||
},
|
||||
}
|
||||
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(err, "Waiting for the pdb to be created with maxUnavailable %d in namespace %s", maxUnavailable.IntVal, ns)
|
||||
waitForPdbToBeProcessed(cs, ns)
|
||||
}
|
||||
|
||||
func updatePDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable intstr.IntOrString) {
|
||||
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
old, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
old.Spec.MinAvailable = &minAvailable
|
||||
if _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Update(old); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
framework.ExpectNoError(err, "Waiting for the pdb update to be processed in namespace %s", ns)
|
||||
waitForPdbToBeProcessed(cs, ns)
|
||||
}
|
||||
|
||||
func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
@@ -281,7 +311,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
return false, fmt.Errorf("pods is nil")
|
||||
}
|
||||
if len(pods.Items) < n {
|
||||
framework.Logf("pods: %v < %v", len(pods.Items), n)
|
||||
e2elog.Logf("pods: %v < %v", len(pods.Items), n)
|
||||
return false, nil
|
||||
}
|
||||
ready := 0
|
||||
@@ -291,7 +321,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
}
|
||||
}
|
||||
if ready < n {
|
||||
framework.Logf("running pods: %v < %v", ready, n)
|
||||
e2elog.Logf("running pods: %v < %v", ready, n)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@@ -334,3 +364,38 @@ func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclu
|
||||
_, err := cs.AppsV1().ReplicaSets(ns).Create(rs)
|
||||
framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns)
|
||||
}
|
||||
|
||||
func locateRunningPod(cs kubernetes.Interface, ns string) (pod *v1.Pod, err error) {
|
||||
ginkgo.By("locating a running pod")
|
||||
err = wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for i := range podList.Items {
|
||||
if podList.Items[i].Status.Phase == v1.PodRunning {
|
||||
pod = &podList.Items[i]
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
return pod, err
|
||||
}
|
||||
|
||||
func waitForPdbToBeProcessed(cs kubernetes.Interface, ns string) {
|
||||
ginkgo.By("Waiting for the pdb to be processed")
|
||||
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if pdb.Status.ObservedGeneration < pdb.Generation {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Waiting for the pdb to be processed in namespace %s", ns)
|
||||
}
|
||||
|
||||
@@ -47,6 +47,14 @@ var _ = SIGDescribe("Job", func() {
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring pods for job exist")
|
||||
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name)
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(int(completions)), "failed to ensure sufficient pod for job: got %d, want %d", len(pods.Items), completions)
|
||||
for _, pod := range pods.Items {
|
||||
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodSucceeded), "failed to ensure pod status: pod %s status %s", pod.Name, pod.Status.Phase)
|
||||
}
|
||||
})
|
||||
|
||||
// Pods sometimes fail, but eventually succeed.
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@@ -60,7 +61,7 @@ func expectNodeReadiness(isReady bool, newNode chan *v1.Node) {
|
||||
if framework.IsNodeConditionSetAsExpected(n, v1.NodeReady, isReady) {
|
||||
expected = true
|
||||
} else {
|
||||
framework.Logf("Observed node ready status is NOT %v as expected", isReady)
|
||||
e2elog.Logf("Observed node ready status is NOT %v as expected", isReady)
|
||||
}
|
||||
case <-timer:
|
||||
timeout = true
|
||||
@@ -96,9 +97,9 @@ func podOnNode(podName, nodeName string, image string) *v1.Pod {
|
||||
func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error {
|
||||
pod, err := c.CoreV1().Pods(namespace).Create(podOnNode(podName, nodeName, framework.ServeHostnameImage))
|
||||
if err == nil {
|
||||
framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
|
||||
e2elog.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
|
||||
} else {
|
||||
framework.Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err)
|
||||
e2elog.Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -262,7 +263,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
// pods on another node and that now the number of replicas is equal 'replicas'.
|
||||
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
e2elog.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -271,7 +272,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
@@ -293,7 +294,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if pod.Spec.NodeName != node.Name {
|
||||
framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
|
||||
e2elog.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -329,7 +330,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
// pods on another node and that now the number of replicas is equal 'replicas + 1'.
|
||||
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
e2elog.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
|
||||
gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
|
||||
@@ -338,7 +339,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
@@ -367,7 +368,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all stateful set in ns %v", ns)
|
||||
e2elog.Logf("Deleting all stateful set in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
@@ -406,12 +407,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
// that belongs to StatefulSet 'statefulSetName', **does not** disappear due to forced deletion from the apiserver.
|
||||
// The grace period on the stateful pods is set to a value > 0.
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name)
|
||||
e2elog.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name)
|
||||
err := framework.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute)
|
||||
gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
@@ -450,7 +451,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
// running pods after the node-controller detects node unreachable.
|
||||
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
e2elog.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute)
|
||||
gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
|
||||
@@ -459,7 +460,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
@@ -536,7 +537,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
}
|
||||
}
|
||||
}
|
||||
framework.Logf(
|
||||
e2elog.Logf(
|
||||
"Only %v should be running after partition. Maximum TolerationSeconds among other Pods is %v",
|
||||
neverEvictedPods,
|
||||
maxTolerationTime,
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@@ -133,7 +134,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
framework.Logf("Ensuring all pods for ReplicationController %q are running", name)
|
||||
e2elog.Logf("Ensuring all pods for ReplicationController %q are running", name)
|
||||
running := int32(0)
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
@@ -149,7 +150,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
||||
}
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||
e2elog.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||
running++
|
||||
}
|
||||
|
||||
@@ -159,7 +160,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
||||
}
|
||||
|
||||
// Verify that something is listening.
|
||||
framework.Logf("Trying to dial the pod")
|
||||
e2elog.Logf("Trying to dial the pod")
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
@@ -178,7 +179,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
||||
namespace := f.Namespace.Name
|
||||
name := "condition-test"
|
||||
|
||||
framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
|
||||
e2elog.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
|
||||
quota := newPodQuota(name, "2")
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@@ -122,7 +123,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
// Create a ReplicaSet for a service that serves its hostname.
|
||||
// The source for the Docker containter kubernetes/serve_hostname is
|
||||
// in contrib/for-demos/serve_hostname
|
||||
framework.Logf("Creating ReplicaSet %s", name)
|
||||
e2elog.Logf("Creating ReplicaSet %s", name)
|
||||
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image)
|
||||
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
||||
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS)
|
||||
@@ -135,7 +136,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
framework.Logf("Ensuring a pod for ReplicaSet %q is running", name)
|
||||
e2elog.Logf("Ensuring a pod for ReplicaSet %q is running", name)
|
||||
running := int32(0)
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
@@ -151,7 +152,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
}
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||
e2elog.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||
running++
|
||||
}
|
||||
|
||||
@@ -161,7 +162,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
}
|
||||
|
||||
// Verify that something is listening.
|
||||
framework.Logf("Trying to dial the pod")
|
||||
e2elog.Logf("Trying to dial the pod")
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@@ -86,7 +87,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all statefulset in ns %v", ns)
|
||||
e2elog.Logf("Deleting all statefulset in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
@@ -753,13 +754,13 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
pod := event.Object.(*v1.Pod)
|
||||
switch event.Type {
|
||||
case watch.Deleted:
|
||||
framework.Logf("Observed delete event for stateful pod %v in namespace %v", pod.Name, pod.Namespace)
|
||||
e2elog.Logf("Observed delete event for stateful pod %v in namespace %v", pod.Name, pod.Namespace)
|
||||
if initialStatefulPodUID == "" {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Observed stateful pod in namespace: %v, name: %v, uid: %v, status phase: %v. Waiting for statefulset controller to delete.",
|
||||
e2elog.Logf("Observed stateful pod in namespace: %v, name: %v, uid: %v, status phase: %v. Waiting for statefulset controller to delete.",
|
||||
pod.Namespace, pod.Name, pod.UID, pod.Status.Phase)
|
||||
initialStatefulPodUID = pod.UID
|
||||
return false, nil
|
||||
@@ -836,7 +837,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all statefulset in ns %v", ns)
|
||||
e2elog.Logf("Deleting all statefulset in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
@@ -876,7 +877,7 @@ func kubectlExecWithRetries(args ...string) (out string) {
|
||||
if out, err = framework.RunKubectl(args...); err == nil {
|
||||
return
|
||||
}
|
||||
framework.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out)
|
||||
e2elog.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out)
|
||||
}
|
||||
framework.Failf("Failed to execute \"%v\" with retries: %v", args, err)
|
||||
return
|
||||
@@ -938,7 +939,7 @@ func (z *zookeeperTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
ns := fmt.Sprintf("--namespace=%v", z.ss.Namespace)
|
||||
for k, v := range kv {
|
||||
cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v)
|
||||
framework.Logf(framework.RunKubectlOrDie("exec", ns, name, "--", "/bin/sh", "-c", cmd))
|
||||
e2elog.Logf(framework.RunKubectlOrDie("exec", ns, name, "--", "/bin/sh", "-c", cmd))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -969,12 +970,12 @@ func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string {
|
||||
func (m *mysqlGaleraTester) deploy(ns string) *apps.StatefulSet {
|
||||
m.ss = m.tester.CreateStatefulSet(mysqlGaleraManifestPath, ns)
|
||||
|
||||
framework.Logf("Deployed statefulset %v, initializing database", m.ss.Name)
|
||||
e2elog.Logf("Deployed statefulset %v, initializing database", m.ss.Name)
|
||||
for _, cmd := range []string{
|
||||
"create database statefulset;",
|
||||
"use statefulset; create table foo (k varchar(20), v varchar(20));",
|
||||
} {
|
||||
framework.Logf(m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ss.Name)))
|
||||
e2elog.Logf(m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ss.Name)))
|
||||
}
|
||||
return m.ss
|
||||
}
|
||||
@@ -983,7 +984,7 @@ func (m *mysqlGaleraTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
|
||||
for k, v := range kv {
|
||||
cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v)
|
||||
framework.Logf(m.mysqlExec(cmd, m.ss.Namespace, name))
|
||||
e2elog.Logf(m.mysqlExec(cmd, m.ss.Namespace, name))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1014,7 +1015,7 @@ func (m *redisTester) deploy(ns string) *apps.StatefulSet {
|
||||
func (m *redisTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
|
||||
for k, v := range kv {
|
||||
framework.Logf(m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ss.Namespace, name))
|
||||
e2elog.Logf(m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ss.Namespace, name))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1039,12 +1040,12 @@ func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string {
|
||||
|
||||
func (c *cockroachDBTester) deploy(ns string) *apps.StatefulSet {
|
||||
c.ss = c.tester.CreateStatefulSet(cockroachDBManifestPath, ns)
|
||||
framework.Logf("Deployed statefulset %v, initializing database", c.ss.Name)
|
||||
e2elog.Logf("Deployed statefulset %v, initializing database", c.ss.Name)
|
||||
for _, cmd := range []string{
|
||||
"CREATE DATABASE IF NOT EXISTS foo;",
|
||||
"CREATE TABLE IF NOT EXISTS foo.bar (k STRING PRIMARY KEY, v STRING);",
|
||||
} {
|
||||
framework.Logf(c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ss.Name)))
|
||||
e2elog.Logf(c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ss.Name)))
|
||||
}
|
||||
return c.ss
|
||||
}
|
||||
@@ -1053,7 +1054,7 @@ func (c *cockroachDBTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
name := fmt.Sprintf("%v-%d", c.ss.Name, statefulPodIndex)
|
||||
for k, v := range kv {
|
||||
cmd := fmt.Sprintf("UPSERT INTO foo.bar VALUES ('%v', '%v');", k, v)
|
||||
framework.Logf(c.cockroachDBExec(cmd, c.ss.Namespace, name))
|
||||
e2elog.Logf(c.cockroachDBExec(cmd, c.ss.Namespace, name))
|
||||
}
|
||||
}
|
||||
func (c *cockroachDBTester) read(statefulPodIndex int, key string) string {
|
||||
|
||||
@@ -42,7 +42,7 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -62,12 +62,12 @@ var (
|
||||
var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
|
||||
f := framework.NewDefaultFramework("audit")
|
||||
var namespace string
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
namespace = f.Namespace.Name
|
||||
})
|
||||
|
||||
It("should audit API calls to create, get, update, patch, delete, list, watch pods.", func() {
|
||||
ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch pods.", func() {
|
||||
pod := &apiv1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit-pod",
|
||||
@@ -201,7 +201,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should audit API calls to create, get, update, patch, delete, list, watch deployments.", func() {
|
||||
ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch deployments.", func() {
|
||||
podLabels := map[string]string{"name": "audit-deployment-pod"}
|
||||
d := e2edeploy.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), apps.RecreateDeploymentStrategyType)
|
||||
|
||||
@@ -328,7 +328,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should audit API calls to create, get, update, patch, delete, list, watch configmaps.", func() {
|
||||
ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch configmaps.", func() {
|
||||
configMap := &apiv1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit-configmap",
|
||||
@@ -461,7 +461,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should audit API calls to create, get, update, patch, delete, list, watch secrets.", func() {
|
||||
ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch secrets.", func() {
|
||||
secret := &apiv1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit-secret",
|
||||
@@ -593,7 +593,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should audit API calls to create and delete custom resource definition.", func() {
|
||||
ginkgo.It("should audit API calls to create and delete custom resource definition.", func() {
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err, "failed to load config")
|
||||
apiExtensionClient, err := apiextensionclientset.NewForConfig(config)
|
||||
@@ -654,12 +654,12 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
|
||||
})
|
||||
|
||||
// test authorizer annotations, RBAC is required.
|
||||
It("should audit API calls to get a pod with unauthorized user.", func() {
|
||||
ginkgo.It("should audit API calls to get a pod with unauthorized user.", func() {
|
||||
if !auth.IsRBACEnabled(f.ClientSet.RbacV1beta1()) {
|
||||
framework.Skipf("RBAC not enabled.")
|
||||
}
|
||||
|
||||
By("Creating a kubernetes client that impersonates an unauthorized anonymous user")
|
||||
ginkgo.By("Creating a kubernetes client that impersonates an unauthorized anonymous user")
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err)
|
||||
config.Impersonate = restclient.ImpersonationConfig{
|
||||
@@ -691,8 +691,8 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should list pods as impersonated user.", func() {
|
||||
By("Creating a kubernetes client that impersonates an authorized user")
|
||||
ginkgo.It("should list pods as impersonated user.", func() {
|
||||
ginkgo.By("Creating a kubernetes client that impersonates an authorized user")
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err)
|
||||
config.Impersonate = restclient.ImpersonationConfig{
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
auditregv1alpha1 "k8s.io/api/auditregistration/v1alpha1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
@@ -44,10 +44,10 @@ import (
|
||||
var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
|
||||
f := framework.NewDefaultFramework("audit")
|
||||
|
||||
It("should dynamically audit API calls", func() {
|
||||
ginkgo.It("should dynamically audit API calls", func() {
|
||||
namespace := f.Namespace.Name
|
||||
|
||||
By("Creating a kubernetes client that impersonates an unauthorized anonymous user")
|
||||
ginkgo.By("Creating a kubernetes client that impersonates an unauthorized anonymous user")
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err, "failed to fetch config")
|
||||
|
||||
|
||||
@@ -31,13 +31,13 @@ import (
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Certificates API", func() {
|
||||
f := framework.NewDefaultFramework("certificates")
|
||||
|
||||
It("should support building a client with a CSR", func() {
|
||||
ginkgo.It("should support building a client with a CSR", func() {
|
||||
const commonName = "tester-csr"
|
||||
|
||||
pk, err := utils.NewPrivateKey()
|
||||
|
||||
@@ -18,6 +18,7 @@ package auth
|
||||
|
||||
import "github.com/onsi/ginkgo"
|
||||
|
||||
// SIGDescribe annotates the test with the SIG label.
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-auth] "+text, body)
|
||||
}
|
||||
|
||||
@@ -23,16 +23,16 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
imageutil "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Metadata Concealment", func() {
|
||||
f := framework.NewDefaultFramework("metadata-concealment")
|
||||
|
||||
It("should run a check-metadata-concealment job to completion", func() {
|
||||
ginkgo.It("should run a check-metadata-concealment job to completion", func() {
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
By("Creating a job")
|
||||
ginkgo.By("Creating a job")
|
||||
job := &batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "check-metadata-concealment",
|
||||
@@ -57,7 +57,7 @@ var _ = SIGDescribe("Metadata Concealment", func() {
|
||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job (%s:%s)", f.Namespace.Name, job.Name)
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
|
||||
framework.ExpectNoError(err, "failed to ensure job completion (%s:%s)", f.Namespace.Name, job.Name)
|
||||
})
|
||||
|
||||
@@ -25,8 +25,8 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
|
||||
@@ -34,12 +34,12 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
|
||||
f := framework.NewDefaultFramework("node-authn")
|
||||
var ns string
|
||||
var nodeIPs []string
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "failed to list nodes in namespace: %s", ns)
|
||||
Expect(len(nodeList.Items)).NotTo(BeZero())
|
||||
gomega.Expect(len(nodeList.Items)).NotTo(gomega.BeZero())
|
||||
|
||||
pickedNode := nodeList.Items[0]
|
||||
nodeIPs = framework.GetNodeAddresses(&pickedNode, v1.NodeExternalIP)
|
||||
@@ -50,20 +50,20 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
|
||||
saName := "default"
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(saName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to retrieve service account (%s:%s)", ns, saName)
|
||||
Expect(len(sa.Secrets)).NotTo(BeZero())
|
||||
gomega.Expect(len(sa.Secrets)).NotTo(gomega.BeZero())
|
||||
})
|
||||
|
||||
It("The kubelet's main port 10250 should reject requests with no credentials", func() {
|
||||
ginkgo.It("The kubelet's main port 10250 should reject requests with no credentials", func() {
|
||||
pod := createNodeAuthTestPod(f)
|
||||
for _, nodeIP := range nodeIPs {
|
||||
// Anonymous authentication is disabled by default
|
||||
result := framework.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' https://%s:%v/metrics", "%{http_code}", nodeIP, ports.KubeletPort))
|
||||
Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet's main port 10250 should reject requests with no credentials")
|
||||
gomega.Expect(result).To(gomega.Or(gomega.Equal("401"), gomega.Equal("403")), "the kubelet's main port 10250 should reject requests with no credentials")
|
||||
}
|
||||
})
|
||||
|
||||
It("The kubelet can delegate ServiceAccount tokens to the API server", func() {
|
||||
By("create a new ServiceAccount for authentication")
|
||||
ginkgo.It("The kubelet can delegate ServiceAccount tokens to the API server", func() {
|
||||
ginkgo.By("create a new ServiceAccount for authentication")
|
||||
trueValue := true
|
||||
newSA := &v1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -84,7 +84,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
|
||||
"%{http_code}",
|
||||
"cat /var/run/secrets/kubernetes.io/serviceaccount/token",
|
||||
nodeIP, ports.KubeletPort))
|
||||
Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet can delegate ServiceAccount tokens to the API server")
|
||||
gomega.Expect(result).To(gomega.Or(gomega.Equal("401"), gomega.Equal("403")), "the kubelet can delegate ServiceAccount tokens to the API server")
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -30,13 +30,13 @@ import (
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
NodesGroup = "system:nodes"
|
||||
NodeNamePrefix = "system:node:"
|
||||
nodesGroup = "system:nodes"
|
||||
nodeNamePrefix = "system:node:"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
@@ -48,47 +48,47 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
var asUser string
|
||||
var defaultSaSecret string
|
||||
var nodeName string
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "failed to list nodes in namespace: %s", ns)
|
||||
Expect(len(nodeList.Items)).NotTo(Equal(0))
|
||||
gomega.Expect(len(nodeList.Items)).NotTo(gomega.Equal(0))
|
||||
nodeName = nodeList.Items[0].Name
|
||||
asUser = NodeNamePrefix + nodeName
|
||||
asUser = nodeNamePrefix + nodeName
|
||||
saName := "default"
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(saName, metav1.GetOptions{})
|
||||
Expect(len(sa.Secrets)).NotTo(Equal(0))
|
||||
gomega.Expect(len(sa.Secrets)).NotTo(gomega.Equal(0))
|
||||
framework.ExpectNoError(err, "failed to retrieve service account (%s:%s)", ns, saName)
|
||||
defaultSaSecret = sa.Secrets[0].Name
|
||||
By("Creating a kubernetes client that impersonates a node")
|
||||
ginkgo.By("Creating a kubernetes client that impersonates a node")
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err, "failed to load kubernetes client config")
|
||||
config.Impersonate = restclient.ImpersonationConfig{
|
||||
UserName: asUser,
|
||||
Groups: []string{NodesGroup},
|
||||
Groups: []string{nodesGroup},
|
||||
}
|
||||
c, err = clientset.NewForConfig(config)
|
||||
framework.ExpectNoError(err, "failed to create Clientset for the given config: %+v", *config)
|
||||
|
||||
})
|
||||
It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func() {
|
||||
ginkgo.It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func() {
|
||||
_, err := c.CoreV1().Secrets(ns).Get("foo", metav1.GetOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true))
|
||||
})
|
||||
|
||||
It("Getting an existing secret should exit with the Forbidden error", func() {
|
||||
ginkgo.It("Getting an existing secret should exit with the Forbidden error", func() {
|
||||
_, err := c.CoreV1().Secrets(ns).Get(defaultSaSecret, metav1.GetOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true))
|
||||
})
|
||||
|
||||
It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func() {
|
||||
ginkgo.It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func() {
|
||||
_, err := c.CoreV1().ConfigMaps(ns).Get("foo", metav1.GetOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true))
|
||||
})
|
||||
|
||||
It("Getting an existing configmap should exit with the Forbidden error", func() {
|
||||
By("Create a configmap for testing")
|
||||
ginkgo.It("Getting an existing configmap should exit with the Forbidden error", func() {
|
||||
ginkgo.By("Create a configmap for testing")
|
||||
configmap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
@@ -101,11 +101,11 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(configmap)
|
||||
framework.ExpectNoError(err, "failed to create configmap (%s:%s) %+v", ns, configmap.Name, *configmap)
|
||||
_, err = c.CoreV1().ConfigMaps(ns).Get(configmap.Name, metav1.GetOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true))
|
||||
})
|
||||
|
||||
It("Getting a secret for a workload the node has access to should succeed", func() {
|
||||
By("Create a secret for testing")
|
||||
ginkgo.It("Getting a secret for a workload the node has access to should succeed", func() {
|
||||
ginkgo.By("Create a secret for testing")
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
@@ -118,11 +118,11 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
_, err := f.ClientSet.CoreV1().Secrets(ns).Create(secret)
|
||||
framework.ExpectNoError(err, "failed to create secret (%s:%s)", ns, secret.Name)
|
||||
|
||||
By("Node should not get the secret")
|
||||
ginkgo.By("Node should not get the secret")
|
||||
_, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true))
|
||||
|
||||
By("Create a pod that use the secret")
|
||||
ginkgo.By("Create a pod that use the secret")
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pause",
|
||||
@@ -151,7 +151,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
_, err = f.ClientSet.CoreV1().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err, "failed to create pod (%s:%s)", ns, pod.Name)
|
||||
|
||||
By("The node should able to access the secret")
|
||||
ginkgo.By("The node should able to access the secret")
|
||||
itv := framework.Poll
|
||||
dur := 1 * time.Minute
|
||||
err = wait.Poll(itv, dur, func() (bool, error) {
|
||||
@@ -165,7 +165,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
framework.ExpectNoError(err, "failed to get secret after trying every %v for %v (%s:%s)", itv, dur, ns, secret.Name)
|
||||
})
|
||||
|
||||
It("A node shouldn't be able to create another node", func() {
|
||||
ginkgo.It("A node shouldn't be able to create another node", func() {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -173,14 +173,14 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
APIVersion: "v1",
|
||||
},
|
||||
}
|
||||
By(fmt.Sprintf("Create node foo by user: %v", asUser))
|
||||
ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser))
|
||||
_, err := c.CoreV1().Nodes().Create(node)
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true))
|
||||
})
|
||||
|
||||
It("A node shouldn't be able to delete another node", func() {
|
||||
By(fmt.Sprintf("Create node foo by user: %v", asUser))
|
||||
ginkgo.It("A node shouldn't be able to delete another node", func() {
|
||||
ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser))
|
||||
err := c.CoreV1().Nodes().Delete("foo", &metav1.DeleteOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
gomega.Expect(apierrors.IsForbidden(err)).Should(gomega.Equal(true))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -37,8 +37,8 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const nobodyUser = int64(65534)
|
||||
@@ -51,7 +51,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
// with reduced privileges.
|
||||
var c clientset.Interface
|
||||
var ns string // Test namespace, for convenience
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
if !framework.IsPodSecurityPolicyEnabled(f) {
|
||||
framework.Skipf("PodSecurityPolicy not enabled")
|
||||
}
|
||||
@@ -60,7 +60,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
}
|
||||
ns = f.Namespace.Name
|
||||
|
||||
By("Creating a kubernetes client that impersonates the default service account")
|
||||
ginkgo.By("Creating a kubernetes client that impersonates the default service account")
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err)
|
||||
config.Impersonate = restclient.ImpersonationConfig{
|
||||
@@ -70,24 +70,24 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
c, err = clientset.NewForConfig(config)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Binding the edit role to the default SA")
|
||||
ginkgo.By("Binding the edit role to the default SA")
|
||||
err = auth.BindClusterRole(f.ClientSet.RbacV1beta1(), "edit", ns,
|
||||
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: ns, Name: "default"})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
It("should forbid pod creation when no PSP is available", func() {
|
||||
By("Running a restricted pod")
|
||||
ginkgo.It("should forbid pod creation when no PSP is available", func() {
|
||||
ginkgo.By("Running a restricted pod")
|
||||
_, err := c.CoreV1().Pods(ns).Create(restrictedPod("restricted"))
|
||||
expectForbidden(err)
|
||||
})
|
||||
|
||||
It("should enforce the restricted policy.PodSecurityPolicy", func() {
|
||||
By("Creating & Binding a restricted policy for the test service account")
|
||||
ginkgo.It("should enforce the restricted policy.PodSecurityPolicy", func() {
|
||||
ginkgo.By("Creating & Binding a restricted policy for the test service account")
|
||||
_, cleanup := createAndBindPSP(f, restrictedPSP("restrictive"))
|
||||
defer cleanup()
|
||||
|
||||
By("Running a restricted pod")
|
||||
ginkgo.By("Running a restricted pod")
|
||||
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod("allowed"))
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
|
||||
@@ -98,8 +98,8 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should allow pods under the privileged policy.PodSecurityPolicy", func() {
|
||||
By("Creating & Binding a privileged policy for the test service account")
|
||||
ginkgo.It("should allow pods under the privileged policy.PodSecurityPolicy", func() {
|
||||
ginkgo.By("Creating & Binding a privileged policy for the test service account")
|
||||
// Ensure that the permissive policy is used even in the presence of the restricted policy.
|
||||
_, cleanup := createAndBindPSP(f, restrictedPSP("restrictive"))
|
||||
defer cleanup()
|
||||
@@ -115,26 +115,26 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
p, err = c.CoreV1().Pods(ns).Get(p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
validated, found := p.Annotations[psputil.ValidatedPSPAnnotation]
|
||||
Expect(found).To(BeTrue(), "PSP annotation not found")
|
||||
Expect(validated).To(Equal(expectedPSP.Name), "Unexpected validated PSP")
|
||||
gomega.Expect(found).To(gomega.BeTrue(), "PSP annotation not found")
|
||||
gomega.Expect(validated).To(gomega.Equal(expectedPSP.Name), "Unexpected validated PSP")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func expectForbidden(err error) {
|
||||
Expect(err).To(HaveOccurred(), "should be forbidden")
|
||||
Expect(apierrs.IsForbidden(err)).To(BeTrue(), "should be forbidden error")
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "should be forbidden")
|
||||
gomega.Expect(apierrs.IsForbidden(err)).To(gomega.BeTrue(), "should be forbidden error")
|
||||
}
|
||||
|
||||
func testPrivilegedPods(tester func(pod *v1.Pod)) {
|
||||
By("Running a privileged pod", func() {
|
||||
ginkgo.By("Running a privileged pod", func() {
|
||||
privileged := restrictedPod("privileged")
|
||||
privileged.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||
privileged.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil
|
||||
tester(privileged)
|
||||
})
|
||||
|
||||
By("Running a HostPath pod", func() {
|
||||
ginkgo.By("Running a HostPath pod", func() {
|
||||
hostpath := restrictedPod("hostpath")
|
||||
hostpath.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{{
|
||||
Name: "hp",
|
||||
@@ -149,26 +149,26 @@ func testPrivilegedPods(tester func(pod *v1.Pod)) {
|
||||
tester(hostpath)
|
||||
})
|
||||
|
||||
By("Running a HostNetwork pod", func() {
|
||||
ginkgo.By("Running a HostNetwork pod", func() {
|
||||
hostnet := restrictedPod("hostnet")
|
||||
hostnet.Spec.HostNetwork = true
|
||||
tester(hostnet)
|
||||
})
|
||||
|
||||
By("Running a HostPID pod", func() {
|
||||
ginkgo.By("Running a HostPID pod", func() {
|
||||
hostpid := restrictedPod("hostpid")
|
||||
hostpid.Spec.HostPID = true
|
||||
tester(hostpid)
|
||||
})
|
||||
|
||||
By("Running a HostIPC pod", func() {
|
||||
ginkgo.By("Running a HostIPC pod", func() {
|
||||
hostipc := restrictedPod("hostipc")
|
||||
hostipc.Spec.HostIPC = true
|
||||
tester(hostipc)
|
||||
})
|
||||
|
||||
if common.IsAppArmorSupported() {
|
||||
By("Running a custom AppArmor profile pod", func() {
|
||||
ginkgo.By("Running a custom AppArmor profile pod", func() {
|
||||
aa := restrictedPod("apparmor")
|
||||
// Every node is expected to have the docker-default profile.
|
||||
aa.Annotations[apparmor.ContainerAnnotationKeyPrefix+"pause"] = "localhost/docker-default"
|
||||
@@ -176,13 +176,13 @@ func testPrivilegedPods(tester func(pod *v1.Pod)) {
|
||||
})
|
||||
}
|
||||
|
||||
By("Running an unconfined Seccomp pod", func() {
|
||||
ginkgo.By("Running an unconfined Seccomp pod", func() {
|
||||
unconfined := restrictedPod("seccomp")
|
||||
unconfined.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
|
||||
tester(unconfined)
|
||||
})
|
||||
|
||||
By("Running a SYS_ADMIN pod", func() {
|
||||
ginkgo.By("Running a SYS_ADMIN pod", func() {
|
||||
sysadmin := restrictedPod("sysadmin")
|
||||
sysadmin.Spec.Containers[0].SecurityContext.Capabilities = &v1.Capabilities{
|
||||
Add: []v1.Capability{"SYS_ADMIN"},
|
||||
@@ -191,14 +191,14 @@ func testPrivilegedPods(tester func(pod *v1.Pod)) {
|
||||
tester(sysadmin)
|
||||
})
|
||||
|
||||
By("Running a RunAsGroup pod", func() {
|
||||
ginkgo.By("Running a RunAsGroup pod", func() {
|
||||
sysadmin := restrictedPod("runasgroup")
|
||||
gid := int64(0)
|
||||
sysadmin.Spec.Containers[0].SecurityContext.RunAsGroup = &gid
|
||||
tester(sysadmin)
|
||||
})
|
||||
|
||||
By("Running a RunAsUser pod", func() {
|
||||
ginkgo.By("Running a RunAsUser pod", func() {
|
||||
sysadmin := restrictedPod("runasuser")
|
||||
uid := int64(0)
|
||||
sysadmin.Spec.Containers[0].SecurityContext.RunAsUser = &uid
|
||||
|
||||
@@ -19,6 +19,8 @@ package auth
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
@@ -33,20 +35,21 @@ import (
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
|
||||
var inClusterClientImage = imageutils.GetE2EImage(imageutils.InClusterClient)
|
||||
|
||||
var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
f := framework.NewDefaultFramework("svcaccounts")
|
||||
|
||||
It("should ensure a single API token exists", func() {
|
||||
ginkgo.It("should ensure a single API token exists", func() {
|
||||
// wait for the service account to reference a single secret
|
||||
var secrets []v1.ObjectReference
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) {
|
||||
By("waiting for a single token reference")
|
||||
ginkgo.By("waiting for a single token reference")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
e2elog.Logf("default service account was not found")
|
||||
@@ -71,20 +74,20 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
|
||||
// make sure the reference doesn't flutter
|
||||
{
|
||||
By("ensuring the single token reference persists")
|
||||
ginkgo.By("ensuring the single token reference persists")
|
||||
time.Sleep(2 * time.Second)
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
Expect(sa.Secrets).To(Equal(secrets))
|
||||
gomega.Expect(sa.Secrets).To(gomega.Equal(secrets))
|
||||
}
|
||||
|
||||
// delete the referenced secret
|
||||
By("deleting the service account token")
|
||||
ginkgo.By("deleting the service account token")
|
||||
framework.ExpectNoError(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secrets[0].Name, nil))
|
||||
|
||||
// wait for the referenced secret to be removed, and another one autocreated
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
|
||||
By("waiting for a new token reference")
|
||||
ginkgo.By("waiting for a new token reference")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("error getting default service account: %v", err)
|
||||
@@ -109,15 +112,15 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
|
||||
// make sure the reference doesn't flutter
|
||||
{
|
||||
By("ensuring the single token reference persists")
|
||||
ginkgo.By("ensuring the single token reference persists")
|
||||
time.Sleep(2 * time.Second)
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
Expect(sa.Secrets).To(Equal(secrets))
|
||||
gomega.Expect(sa.Secrets).To(gomega.Equal(secrets))
|
||||
}
|
||||
|
||||
// delete the reference from the service account
|
||||
By("deleting the reference to the service account token")
|
||||
ginkgo.By("deleting the reference to the service account token")
|
||||
{
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -128,7 +131,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
|
||||
// wait for another one to be autocreated
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
|
||||
By("waiting for a new token to be created and added")
|
||||
ginkgo.By("waiting for a new token to be created and added")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("error getting default service account: %v", err)
|
||||
@@ -149,11 +152,11 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
|
||||
// make sure the reference doesn't flutter
|
||||
{
|
||||
By("ensuring the single token reference persists")
|
||||
ginkgo.By("ensuring the single token reference persists")
|
||||
time.Sleep(2 * time.Second)
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
Expect(sa.Secrets).To(Equal(secrets))
|
||||
gomega.Expect(sa.Secrets).To(gomega.Equal(secrets))
|
||||
}
|
||||
})
|
||||
|
||||
@@ -174,7 +177,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
|
||||
// Standard get, update retry loop
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
|
||||
By("getting the auto-created API token")
|
||||
ginkgo.By("getting the auto-created API token")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("mount-test", metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
e2elog.Logf("mount-test service account was not found")
|
||||
@@ -231,19 +234,19 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// CA and namespace should be identical
|
||||
Expect(mountedCA).To(Equal(rootCAContent))
|
||||
Expect(mountedNamespace).To(Equal(f.Namespace.Name))
|
||||
gomega.Expect(mountedCA).To(gomega.Equal(rootCAContent))
|
||||
gomega.Expect(mountedNamespace).To(gomega.Equal(f.Namespace.Name))
|
||||
// Token should be a valid credential that identifies the pod's service account
|
||||
tokenReview := &authenticationv1.TokenReview{Spec: authenticationv1.TokenReviewSpec{Token: mountedToken}}
|
||||
tokenReview, err = f.ClientSet.AuthenticationV1().TokenReviews().Create(tokenReview)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(tokenReview.Status.Authenticated).To(Equal(true))
|
||||
Expect(tokenReview.Status.Error).To(Equal(""))
|
||||
Expect(tokenReview.Status.User.Username).To(Equal("system:serviceaccount:" + f.Namespace.Name + ":" + sa.Name))
|
||||
gomega.Expect(tokenReview.Status.Authenticated).To(gomega.Equal(true))
|
||||
gomega.Expect(tokenReview.Status.Error).To(gomega.Equal(""))
|
||||
gomega.Expect(tokenReview.Status.User.Username).To(gomega.Equal("system:serviceaccount:" + f.Namespace.Name + ":" + sa.Name))
|
||||
groups := sets.NewString(tokenReview.Status.User.Groups...)
|
||||
Expect(groups.Has("system:authenticated")).To(Equal(true), fmt.Sprintf("expected system:authenticated group, had %v", groups.List()))
|
||||
Expect(groups.Has("system:serviceaccounts")).To(Equal(true), fmt.Sprintf("expected system:serviceaccounts group, had %v", groups.List()))
|
||||
Expect(groups.Has("system:serviceaccounts:"+f.Namespace.Name)).To(Equal(true), fmt.Sprintf("expected system:serviceaccounts:"+f.Namespace.Name+" group, had %v", groups.List()))
|
||||
gomega.Expect(groups.Has("system:authenticated")).To(gomega.Equal(true), fmt.Sprintf("expected system:authenticated group, had %v", groups.List()))
|
||||
gomega.Expect(groups.Has("system:serviceaccounts")).To(gomega.Equal(true), fmt.Sprintf("expected system:serviceaccounts group, had %v", groups.List()))
|
||||
gomega.Expect(groups.Has("system:serviceaccounts:"+f.Namespace.Name)).To(gomega.Equal(true), fmt.Sprintf("expected system:serviceaccounts:"+f.Namespace.Name+" group, had %v", groups.List()))
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -285,7 +288,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
|
||||
// Standard get, update retry loop
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
|
||||
By("getting the auto-created API token")
|
||||
ginkgo.By("getting the auto-created API token")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(mountSA.Name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
e2elog.Logf("mount service account was not found")
|
||||
@@ -410,4 +413,138 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should support InClusterConfig with token rotation [Slow] [Feature:TokenRequestProjection]", func() {
|
||||
cfg, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(&v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kube-root-ca.crt",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"ca.crt": string(cfg.TLSClientConfig.CAData),
|
||||
},
|
||||
}); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
framework.Failf("Unexpected err creating kube-ca-crt: %v", err)
|
||||
}
|
||||
|
||||
tenMin := int64(10 * 60)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "inclusterclient"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "inclusterclient",
|
||||
Image: inClusterClientImage,
|
||||
VolumeMounts: []v1.VolumeMount{{
|
||||
MountPath: "/var/run/secrets/kubernetes.io/serviceaccount",
|
||||
Name: "kube-api-access-e2e",
|
||||
ReadOnly: true,
|
||||
}},
|
||||
}},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
ServiceAccountName: "default",
|
||||
Volumes: []v1.Volume{{
|
||||
Name: "kube-api-access-e2e",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ServiceAccountToken: &v1.ServiceAccountTokenProjection{
|
||||
Path: "token",
|
||||
ExpirationSeconds: &tenMin,
|
||||
},
|
||||
},
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "kube-root-ca.crt",
|
||||
},
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "ca.crt",
|
||||
Path: "ca.crt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
DownwardAPI: &v1.DownwardAPIProjection{
|
||||
Items: []v1.DownwardAPIVolumeFile{
|
||||
{
|
||||
Path: "namespace",
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("created pod")
|
||||
if !framework.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) {
|
||||
framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name)
|
||||
}
|
||||
|
||||
framework.Logf("pod is ready")
|
||||
|
||||
var logs string
|
||||
if err := wait.Poll(1*time.Minute, 20*time.Minute, func() (done bool, err error) {
|
||||
framework.Logf("polling logs")
|
||||
logs, err = framework.GetPodLogs(f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient")
|
||||
if err != nil {
|
||||
framework.Logf("Error pulling logs: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
tokenCount, err := parseInClusterClientLogs(logs)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("inclusterclient reported an error: %v", err)
|
||||
}
|
||||
if tokenCount < 2 {
|
||||
framework.Logf("Retrying. Still waiting to see more unique tokens: got=%d, want=2", tokenCount)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
framework.Failf("Unexpected error: %v\n%s", err, logs)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
var reportLogsParser = regexp.MustCompile("([a-zA-Z0-9-_]*)=([a-zA-Z0-9-_]*)$")
|
||||
|
||||
func parseInClusterClientLogs(logs string) (int, error) {
|
||||
seenTokens := map[string]struct{}{}
|
||||
|
||||
lines := strings.Split(logs, "\n")
|
||||
for _, line := range lines {
|
||||
parts := reportLogsParser.FindStringSubmatch(line)
|
||||
if len(parts) != 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
key, value := parts[1], parts[2]
|
||||
switch key {
|
||||
case "authz_header":
|
||||
if value == "<empty>" {
|
||||
return 0, fmt.Errorf("saw empty Authorization header")
|
||||
}
|
||||
seenTokens[value] = struct{}{}
|
||||
case "status":
|
||||
if value == "failed" {
|
||||
return 0, fmt.Errorf("saw status=failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return len(seenTokens), nil
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
],
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
@@ -46,7 +47,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
|
||||
nodeToDelete := nodeDeleteCandidates.Items[0]
|
||||
|
||||
origNodes := framework.GetReadyNodesIncludingTaintedOrDie(c)
|
||||
framework.Logf("Original number of ready nodes: %d", len(origNodes.Items))
|
||||
e2elog.Logf("Original number of ready nodes: %d", len(origNodes.Items))
|
||||
|
||||
err := framework.DeleteNodeOnCloudProvider(&nodeToDelete)
|
||||
if err != nil {
|
||||
|
||||
@@ -69,7 +69,6 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
|
||||
@@ -38,8 +38,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
probTestContainerName = "test-webserver"
|
||||
probTestInitialDelaySeconds = 15
|
||||
probeTestInitialDelaySeconds = 15
|
||||
|
||||
defaultObservationTimeout = time.Minute * 4
|
||||
)
|
||||
@@ -59,7 +58,8 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: Create a Pod that is configured with a initial delay set on the readiness probe. Check the Pod Start time to compare to the initial delay. The Pod MUST be ready only after the specified initial delay.
|
||||
*/
|
||||
framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func() {
|
||||
p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
|
||||
containerName := "test-webserver"
|
||||
p := podClient.Create(testWebServerPodSpec(probe.withInitialDelay().build(), nil, containerName, 80))
|
||||
f.WaitForPodReady(p.Name)
|
||||
|
||||
p, err := podClient.Get(p.Name, metav1.GetOptions{})
|
||||
@@ -72,11 +72,11 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
// is true for a single container pod.
|
||||
readyTime, err := getTransitionTimeForReadyCondition(p)
|
||||
framework.ExpectNoError(err)
|
||||
startedTime, err := getContainerStartedTime(p, probTestContainerName)
|
||||
startedTime, err := getContainerStartedTime(p, containerName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
|
||||
initialDelay := probTestInitialDelaySeconds * time.Second
|
||||
initialDelay := probeTestInitialDelaySeconds * time.Second
|
||||
if readyTime.Sub(startedTime) < initialDelay {
|
||||
framework.Failf("Pod became ready before it's %v initial delay", initialDelay)
|
||||
}
|
||||
@@ -92,7 +92,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
then the Pod MUST never be ready, never be running and restart count MUST be zero.
|
||||
*/
|
||||
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func() {
|
||||
p := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
|
||||
p := podClient.Create(testWebServerPodSpec(probe.withFailing().build(), nil, "test-webserver", 80))
|
||||
Consistently(func() (bool, error) {
|
||||
p, err := podClient.Get(p.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -117,30 +117,14 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: Create a Pod with liveness probe that uses ExecAction handler to cat /temp/health file. The Container deletes the file /temp/health after 10 second, triggering liveness probe to fail. The Pod MUST now be killed and restarted incrementing restart count to 1.
|
||||
*/
|
||||
framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-exec",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: []string{"cat", "/tmp/health"},
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
FailureThreshold: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 1, defaultObservationTimeout)
|
||||
cmd := []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"}
|
||||
livenessProbe := &v1.Probe{
|
||||
Handler: execHandler([]string{"cat", "/tmp/health"}),
|
||||
InitialDelaySeconds: 15,
|
||||
FailureThreshold: 1,
|
||||
}
|
||||
pod := busyBoxPodSpec(nil, livenessProbe, cmd)
|
||||
runLivenessTest(f, pod, 1, defaultObservationTimeout)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -149,30 +133,14 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: Pod is created with liveness probe that uses ‘exec’ command to cat /temp/health file. Liveness probe MUST not fail to check health and the restart count should remain 0.
|
||||
*/
|
||||
framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-exec",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: []string{"cat", "/tmp/health"},
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
FailureThreshold: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 0, defaultObservationTimeout)
|
||||
cmd := []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"}
|
||||
livenessProbe := &v1.Probe{
|
||||
Handler: execHandler([]string{"cat", "/tmp/health"}),
|
||||
InitialDelaySeconds: 15,
|
||||
FailureThreshold: 1,
|
||||
}
|
||||
pod := busyBoxPodSpec(nil, livenessProbe, cmd)
|
||||
runLivenessTest(f, pod, 0, defaultObservationTimeout)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -181,31 +149,28 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1.
|
||||
*/
|
||||
framework.ConformanceIt("should be restarted with a /healthz http liveness probe [NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: imageutils.GetE2EImage(imageutils.Liveness),
|
||||
Command: []string{"/server"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/healthz",
|
||||
Port: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
FailureThreshold: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 1, defaultObservationTimeout)
|
||||
livenessProbe := &v1.Probe{
|
||||
Handler: httpGetHandler("/healthz", 8080),
|
||||
InitialDelaySeconds: 15,
|
||||
FailureThreshold: 1,
|
||||
}
|
||||
pod := livenessPodSpec(nil, livenessProbe)
|
||||
runLivenessTest(f, pod, 1, defaultObservationTimeout)
|
||||
})
|
||||
|
||||
/*
|
||||
Release : v1.15
|
||||
Testname: Pod liveness probe, using tcp socket, no restart
|
||||
Description: A Pod is created with liveness probe on tcp socket 8080. The http handler on port 8080 will return http errors after 10 seconds, but socket will remain open. Liveness probe MUST not fail to check health and the restart count should remain 0.
|
||||
*/
|
||||
It("should *not* be restarted with a tcp:8080 liveness probe [NodeConformance]", func() {
|
||||
livenessProbe := &v1.Probe{
|
||||
Handler: tcpSocketHandler(8080),
|
||||
InitialDelaySeconds: 15,
|
||||
FailureThreshold: 1,
|
||||
}
|
||||
pod := livenessPodSpec(nil, livenessProbe)
|
||||
runLivenessTest(f, pod, 0, defaultObservationTimeout)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -214,31 +179,13 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. The liveness probe must fail again after restart once the http handler for /healthz enpoind on the Pod returns an http error after 10 seconds from the start. Restart counts MUST increment everytime health check fails, measure upto 5 restart.
|
||||
*/
|
||||
framework.ConformanceIt("should have monotonically increasing restart count [NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: imageutils.GetE2EImage(imageutils.Liveness),
|
||||
Command: []string{"/server"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/healthz",
|
||||
Port: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 5,
|
||||
FailureThreshold: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 5, time.Minute*5)
|
||||
livenessProbe := &v1.Probe{
|
||||
Handler: httpGetHandler("/healthz", 8080),
|
||||
InitialDelaySeconds: 5,
|
||||
FailureThreshold: 1,
|
||||
}
|
||||
pod := livenessPodSpec(nil, livenessProbe)
|
||||
runLivenessTest(f, pod, 5, time.Minute*5)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -247,32 +194,14 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: A Pod is created with liveness probe on http endpoint ‘/’. Liveness probe on this endpoint will not fail. When liveness probe does not fail then the restart count MUST remain zero.
|
||||
*/
|
||||
framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe [NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: imageutils.GetE2EImage(imageutils.TestWebserver),
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/",
|
||||
Port: intstr.FromInt(80),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
TimeoutSeconds: 5,
|
||||
FailureThreshold: 5, // to accommodate nodes which are slow in bringing up containers.
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 0, defaultObservationTimeout)
|
||||
livenessProbe := &v1.Probe{
|
||||
Handler: httpGetHandler("/", 80),
|
||||
InitialDelaySeconds: 15,
|
||||
TimeoutSeconds: 5,
|
||||
FailureThreshold: 5, // to accommodate nodes which are slow in bringing up containers.
|
||||
}
|
||||
pod := testWebServerPodSpec(nil, livenessProbe, "test-webserver", 80)
|
||||
runLivenessTest(f, pod, 0, defaultObservationTimeout)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -283,31 +212,15 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
It("should be restarted with a docker exec liveness probe with timeout ", func() {
|
||||
// TODO: enable this test once the default exec handler supports timeout.
|
||||
framework.Skipf("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API")
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-exec",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh", "-c", "sleep 600"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: []string{"/bin/sh", "-c", "sleep 10"},
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
TimeoutSeconds: 1,
|
||||
FailureThreshold: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 1, defaultObservationTimeout)
|
||||
cmd := []string{"/bin/sh", "-c", "sleep 600"}
|
||||
livenessProbe := &v1.Probe{
|
||||
Handler: execHandler([]string{"/bin/sh", "-c", "sleep 10"}),
|
||||
InitialDelaySeconds: 15,
|
||||
TimeoutSeconds: 1,
|
||||
FailureThreshold: 1,
|
||||
}
|
||||
pod := busyBoxPodSpec(nil, livenessProbe, cmd)
|
||||
runLivenessTest(f, pod, 1, defaultObservationTimeout)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -316,31 +229,13 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: A Pod is created with liveness probe on http endpoint /redirect?loc=healthz. The http handler on the /redirect will redirect to the /healthz endpoint, which will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1.
|
||||
*/
|
||||
It("should be restarted with a local redirect http liveness probe", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-http-redirect",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: imageutils.GetE2EImage(imageutils.Liveness),
|
||||
Command: []string{"/server"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/redirect?loc=" + url.QueryEscape("/healthz"),
|
||||
Port: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
FailureThreshold: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 1, defaultObservationTimeout)
|
||||
livenessProbe := &v1.Probe{
|
||||
Handler: httpGetHandler("/redirect?loc="+url.QueryEscape("/healthz"), 8080),
|
||||
InitialDelaySeconds: 15,
|
||||
FailureThreshold: 1,
|
||||
}
|
||||
pod := livenessPodSpec(nil, livenessProbe)
|
||||
runLivenessTest(f, pod, 1, defaultObservationTimeout)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -349,31 +244,12 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: A Pod is created with liveness probe on http endpoint /redirect with a redirect to http://0.0.0.0/. The http handler on the /redirect should not follow the redirect, but instead treat it as a success and generate an event.
|
||||
*/
|
||||
It("should *not* be restarted with a non-local redirect http liveness probe", func() {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-http-redirect",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: imageutils.GetE2EImage(imageutils.Liveness),
|
||||
Command: []string{"/server"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/redirect?loc=" + url.QueryEscape("http://0.0.0.0/"),
|
||||
Port: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
FailureThreshold: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
livenessProbe := &v1.Probe{
|
||||
Handler: httpGetHandler("/redirect?loc="+url.QueryEscape("http://0.0.0.0/"), 8080),
|
||||
InitialDelaySeconds: 15,
|
||||
FailureThreshold: 1,
|
||||
}
|
||||
pod := livenessPodSpec(nil, livenessProbe)
|
||||
runLivenessTest(f, pod, 0, defaultObservationTimeout)
|
||||
// Expect an event of type "ProbeWarning".
|
||||
expectedEvent := fields.Set{
|
||||
@@ -417,21 +293,86 @@ func getRestartCount(p *v1.Pod) int {
|
||||
return count
|
||||
}
|
||||
|
||||
func makePodSpec(readinessProbe, livenessProbe *v1.Probe) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
func testWebServerPodSpec(readinessProbe, livenessProbe *v1.Probe, containerName string, port int) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-webserver-" + string(uuid.NewUUID())},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: probTestContainerName,
|
||||
Name: containerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.TestWebserver),
|
||||
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
|
||||
LivenessProbe: livenessProbe,
|
||||
ReadinessProbe: readinessProbe,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func busyBoxPodSpec(readinessProbe, livenessProbe *v1.Probe, cmd []string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "busybox-" + string(uuid.NewUUID()),
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: cmd,
|
||||
LivenessProbe: livenessProbe,
|
||||
ReadinessProbe: readinessProbe,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func livenessPodSpec(readinessProbe, livenessProbe *v1.Probe) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-" + string(uuid.NewUUID()),
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: imageutils.GetE2EImage(imageutils.Liveness),
|
||||
Command: []string{"/server"},
|
||||
LivenessProbe: livenessProbe,
|
||||
ReadinessProbe: readinessProbe,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func execHandler(cmd []string) v1.Handler {
|
||||
return v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: cmd,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func httpGetHandler(path string, port int) v1.Handler {
|
||||
return v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: path,
|
||||
Port: intstr.FromInt(port),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func tcpSocketHandler(port int) v1.Handler {
|
||||
return v1.Handler{
|
||||
TCPSocket: &v1.TCPSocketAction{
|
||||
Port: intstr.FromInt(port),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type webserverProbeBuilder struct {
|
||||
@@ -451,15 +392,10 @@ func (b webserverProbeBuilder) withInitialDelay() webserverProbeBuilder {
|
||||
|
||||
func (b webserverProbeBuilder) build() *v1.Probe {
|
||||
probe := &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Port: intstr.FromInt(80),
|
||||
Path: "/",
|
||||
},
|
||||
},
|
||||
Handler: httpGetHandler("/", 80),
|
||||
}
|
||||
if b.initialDelay {
|
||||
probe.InitialDelaySeconds = probTestInitialDelaySeconds
|
||||
probe.InitialDelaySeconds = probeTestInitialDelaySeconds
|
||||
}
|
||||
if b.failing {
|
||||
probe.HTTPGet.Port = intstr.FromInt(81)
|
||||
|
||||
@@ -23,18 +23,12 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var (
|
||||
hostIPVersion = utilversion.MustParseSemantic("v1.8.0")
|
||||
podUIDVersion = utilversion.MustParseSemantic("v1.8.0")
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-node] Downward API", func() {
|
||||
f := framework.NewDefaultFramework("downward-api")
|
||||
|
||||
@@ -90,7 +84,6 @@ var _ = Describe("[sig-node] Downward API", func() {
|
||||
Description: Downward API MUST expose Pod and Container fields as environment variables. Specify host IP as environment variable in the Pod Spec are visible at runtime in the container.
|
||||
*/
|
||||
framework.ConformanceIt("should provide host IP as an env var [NodeConformance]", func() {
|
||||
framework.SkipUnlessServerVersionGTE(hostIPVersion, f.ClientSet.Discovery())
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
@@ -218,7 +211,6 @@ var _ = Describe("[sig-node] Downward API", func() {
|
||||
Description: Downward API MUST expose Pod UID set through environment variables at runtime in the container.
|
||||
*/
|
||||
framework.ConformanceIt("should provide pod UID as env vars [NodeConformance]", func() {
|
||||
framework.SkipUnlessServerVersionGTE(podUIDVersion, f.ClientSet.Discovery())
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
|
||||
@@ -67,7 +67,7 @@ func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
|
||||
t := time.Now()
|
||||
for {
|
||||
p, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get pod %q", pod.Name)
|
||||
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
|
||||
if p.Status.HostIP != "" {
|
||||
e2elog.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
|
||||
break
|
||||
@@ -218,7 +218,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
|
||||
framework.ExpectNoError(err, "failed to query for pods")
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
@@ -256,7 +256,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
|
||||
framework.ExpectNoError(err, "failed to query for pods")
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
|
||||
|
||||
ginkgo.By("verifying pod creation was observed")
|
||||
@@ -279,11 +279,11 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
// save the running pod
|
||||
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod")
|
||||
framework.ExpectNoError(err, "failed to GET scheduled pod")
|
||||
|
||||
ginkgo.By("deleting the pod gracefully")
|
||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(30))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod")
|
||||
framework.ExpectNoError(err, "failed to delete pod")
|
||||
|
||||
ginkgo.By("verifying the kubelet observed the termination notice")
|
||||
gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
|
||||
@@ -335,7 +335,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
|
||||
framework.ExpectNoError(err, "failed to query for pods")
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
|
||||
})
|
||||
|
||||
@@ -373,7 +373,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
|
||||
framework.ExpectNoError(err, "failed to query for pods")
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
|
||||
|
||||
ginkgo.By("updating the pod")
|
||||
@@ -388,7 +388,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
|
||||
framework.ExpectNoError(err, "failed to query for pods")
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
|
||||
e2elog.Logf("Pod update OK")
|
||||
})
|
||||
@@ -427,7 +427,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
|
||||
framework.ExpectNoError(err, "failed to query for pods")
|
||||
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
|
||||
|
||||
ginkgo.By("updating the pod")
|
||||
@@ -491,7 +491,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service")
|
||||
framework.ExpectNoError(err, "failed to create service")
|
||||
|
||||
// Make a client pod that verifies that it has the service environment variables.
|
||||
podName := "client-envvars-" + string(uuid.NewUUID())
|
||||
@@ -538,7 +538,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should support remote command execution over websockets [NodeConformance]", func() {
|
||||
config, err := framework.LoadConfig()
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config")
|
||||
framework.ExpectNoError(err, "unable to get base config")
|
||||
|
||||
ginkgo.By("creating the pod")
|
||||
name := "pod-exec-websocket-" + string(uuid.NewUUID())
|
||||
@@ -620,7 +620,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should support retrieving logs from the container over websockets [NodeConformance]", func() {
|
||||
config, err := framework.LoadConfig()
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config")
|
||||
framework.ExpectNoError(err, "unable to get base config")
|
||||
|
||||
ginkgo.By("creating the pod")
|
||||
name := "pod-logs-websocket-" + string(uuid.NewUUID())
|
||||
@@ -798,14 +798,15 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
}
|
||||
|
||||
validatePodReadiness := func(expectReady bool) {
|
||||
gomega.Expect(wait.Poll(time.Second, maxReadyStatusUpdateTolerance, func() (bool, error) {
|
||||
err := wait.Poll(time.Second, maxReadyStatusUpdateTolerance, func() (bool, error) {
|
||||
podReady := podClient.PodIsReady(podName)
|
||||
res := expectReady == podReady
|
||||
if !res {
|
||||
e2elog.Logf("Expect the Ready condition of pod %q to be %v, but got %v", podName, expectReady, podReady)
|
||||
}
|
||||
return res, nil
|
||||
})).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
ginkgo.By("submitting the pod to kubernetes")
|
||||
@@ -814,19 +815,19 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1))
|
||||
_, err := podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// Sleep for 10 seconds.
|
||||
time.Sleep(maxReadyStatusUpdateTolerance)
|
||||
gomega.Expect(podClient.PodIsReady(podName)).To(gomega.BeFalse(), "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2))
|
||||
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
validatePodReadiness(true)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1))
|
||||
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
validatePodReadiness(false)
|
||||
|
||||
})
|
||||
|
||||
@@ -114,7 +114,7 @@ while true; do sleep 1; done
|
||||
Expect(terminateContainer.IsReady()).Should(Equal(testCase.Ready))
|
||||
|
||||
status, err := terminateContainer.GetStatus()
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name))
|
||||
Expect(GetContainerState(status.State)).To(Equal(testCase.State))
|
||||
@@ -148,7 +148,7 @@ while true; do sleep 1; done
|
||||
|
||||
By("get the container status")
|
||||
status, err := c.GetStatus()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("the container should be terminated")
|
||||
Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated))
|
||||
@@ -286,7 +286,7 @@ while true; do sleep 1; done
|
||||
secret.Name = "image-pull-secret-" + string(uuid.NewUUID())
|
||||
By("create image pull secret")
|
||||
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil)
|
||||
container.ImagePullSecrets = []string{secret.Name}
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
|
||||
// failed pods without running containers. This would create a race as the pod
|
||||
// might have already been deleted here.
|
||||
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
|
||||
framework.Skipf("No sysctl support in Docker <1.12")
|
||||
}
|
||||
@@ -85,16 +85,16 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
|
||||
|
||||
By("Waiting for pod completion")
|
||||
err = f.WaitForPodNoLongerRunning(pod.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Checking that the pod succeeded")
|
||||
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
|
||||
|
||||
By("Getting logs from the pod")
|
||||
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Checking that the sysctl is actually updated")
|
||||
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
|
||||
@@ -120,7 +120,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
|
||||
// failed pods without running containers. This would create a race as the pod
|
||||
// might have already been deleted here.
|
||||
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
|
||||
framework.Skipf("No sysctl support in Docker <1.12")
|
||||
}
|
||||
@@ -128,16 +128,16 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
|
||||
|
||||
By("Waiting for pod completion")
|
||||
err = f.WaitForPodNoLongerRunning(pod.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Checking that the pod succeeded")
|
||||
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
|
||||
|
||||
By("Getting logs from the pod")
|
||||
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Checking that the sysctl is actually updated")
|
||||
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
|
||||
@@ -197,7 +197,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
|
||||
// failed pods without running containers. This would create a race as the pod
|
||||
// might have already been deleted here.
|
||||
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
|
||||
framework.Skipf("No sysctl support in Docker <1.12")
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -78,7 +77,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
passed := true
|
||||
checkRestart := func(podName string, timeout time.Duration) {
|
||||
err := framework.WaitForPodNameRunningInNamespace(c, podName, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
|
||||
@@ -124,11 +123,11 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
framework.RunKubectlOrDieInput(secretYaml, "create", "-f", "-", nsFlag)
|
||||
framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag)
|
||||
err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("checking if secret was read correctly")
|
||||
_, err = framework.LookForStringInLog(ns, "secret-test-pod", "test-container", "value-1", serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -142,13 +141,13 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
By("creating the pod")
|
||||
framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag)
|
||||
err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("checking if name and namespace were passed correctly")
|
||||
_, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAMESPACE=%v", ns), serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
_, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAME=%v", podName), serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -90,7 +90,6 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
|
||||
|
||||
@@ -31,7 +31,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
@@ -101,9 +100,9 @@ func visitManifests(cb func([]byte) error, files ...string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PatchItems modifies the given items in place such that each
|
||||
// test gets its own instances, to avoid conflicts between different tests and
|
||||
// between tests and normal deployments.
|
||||
// PatchItems modifies the given items in place such that each test
|
||||
// gets its own instances, to avoid conflicts between different tests
|
||||
// and between tests and normal deployments.
|
||||
//
|
||||
// This is done by:
|
||||
// - creating namespaced items inside the test's namespace
|
||||
@@ -288,27 +287,18 @@ var factories = map[What]ItemFactory{
|
||||
{"StorageClass"}: &storageClassFactory{},
|
||||
}
|
||||
|
||||
// uniquifyName makes the name of some item unique per namespace by appending the
|
||||
// generated unique name of the test namespace.
|
||||
func (f *Framework) uniquifyName(item *string) {
|
||||
// PatchName makes the name of some item unique by appending the
|
||||
// generated unique name.
|
||||
func (f *Framework) PatchName(item *string) {
|
||||
if *item != "" {
|
||||
*item = *item + "-" + f.UniqueName
|
||||
}
|
||||
}
|
||||
|
||||
// randomizeStorageClassName makes the name of the storage class unique per call
|
||||
// by appending the generated unique name of the test namespace and a random 5
|
||||
// character string
|
||||
func (f *Framework) randomizeStorageClassName(item *string) {
|
||||
if *item != "" {
|
||||
*item = names.SimpleNameGenerator.GenerateName(*item + "-" + f.UniqueName + "-")
|
||||
}
|
||||
}
|
||||
|
||||
// patchNamespace moves the item into the test's namespace. Not
|
||||
// PatchNamespace moves the item into the test's namespace. Not
|
||||
// all items can be namespaced. For those, the name also needs to be
|
||||
// patched.
|
||||
func (f *Framework) patchNamespace(item *string) {
|
||||
func (f *Framework) PatchNamespace(item *string) {
|
||||
if f.Namespace != nil {
|
||||
*item = f.Namespace.GetName()
|
||||
}
|
||||
@@ -317,31 +307,31 @@ func (f *Framework) patchNamespace(item *string) {
|
||||
func (f *Framework) patchItemRecursively(item interface{}) error {
|
||||
switch item := item.(type) {
|
||||
case *rbac.Subject:
|
||||
f.patchNamespace(&item.Namespace)
|
||||
f.PatchNamespace(&item.Namespace)
|
||||
case *rbac.RoleRef:
|
||||
// TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles
|
||||
// which contains all role names that are defined cluster-wide before the test starts?
|
||||
// All those names are excempt from renaming. That list could be populated by querying
|
||||
// and get extended by tests.
|
||||
if item.Name != "e2e-test-privileged-psp" {
|
||||
f.uniquifyName(&item.Name)
|
||||
f.PatchName(&item.Name)
|
||||
}
|
||||
case *rbac.ClusterRole:
|
||||
f.uniquifyName(&item.Name)
|
||||
f.PatchName(&item.Name)
|
||||
case *rbac.Role:
|
||||
f.patchNamespace(&item.Namespace)
|
||||
f.PatchNamespace(&item.Namespace)
|
||||
// Roles are namespaced, but because for RoleRef above we don't
|
||||
// know whether the referenced role is a ClusterRole or Role
|
||||
// and therefore always renames, we have to do the same here.
|
||||
f.uniquifyName(&item.Name)
|
||||
f.PatchName(&item.Name)
|
||||
case *storage.StorageClass:
|
||||
f.randomizeStorageClassName(&item.Name)
|
||||
f.PatchName(&item.Name)
|
||||
case *v1.ServiceAccount:
|
||||
f.patchNamespace(&item.ObjectMeta.Namespace)
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *v1.Secret:
|
||||
f.patchNamespace(&item.ObjectMeta.Namespace)
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *rbac.ClusterRoleBinding:
|
||||
f.uniquifyName(&item.Name)
|
||||
f.PatchName(&item.Name)
|
||||
for i := range item.Subjects {
|
||||
if err := f.patchItemRecursively(&item.Subjects[i]); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
@@ -351,7 +341,7 @@ func (f *Framework) patchItemRecursively(item interface{}) error {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
case *rbac.RoleBinding:
|
||||
f.patchNamespace(&item.Namespace)
|
||||
f.PatchNamespace(&item.Namespace)
|
||||
for i := range item.Subjects {
|
||||
if err := f.patchItemRecursively(&item.Subjects[i]); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
@@ -361,11 +351,11 @@ func (f *Framework) patchItemRecursively(item interface{}) error {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
case *v1.Service:
|
||||
f.patchNamespace(&item.ObjectMeta.Namespace)
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *apps.StatefulSet:
|
||||
f.patchNamespace(&item.ObjectMeta.Namespace)
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *apps.DaemonSet:
|
||||
f.patchNamespace(&item.ObjectMeta.Namespace)
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
default:
|
||||
return errors.Errorf("missing support for patching item of type %T", item)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ go_library(
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/networking/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
|
||||
@@ -39,7 +39,7 @@ import (
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -186,8 +186,8 @@ func CreateIngressComformanceTests(jig *TestJig, ns string, annotations map[stri
|
||||
fmt.Sprintf("should update url map for host %v to expose a single url: %v", updateURLMapHost, updateURLMapPath),
|
||||
func() {
|
||||
var pathToFail string
|
||||
jig.Update(func(ing *extensions.Ingress) {
|
||||
newRules := []extensions.IngressRule{}
|
||||
jig.Update(func(ing *networkingv1beta1.Ingress) {
|
||||
newRules := []networkingv1beta1.IngressRule{}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.Host != updateURLMapHost {
|
||||
newRules = append(newRules, rule)
|
||||
@@ -195,11 +195,11 @@ func CreateIngressComformanceTests(jig *TestJig, ns string, annotations map[stri
|
||||
}
|
||||
existingPath := rule.IngressRuleValue.HTTP.Paths[0]
|
||||
pathToFail = existingPath.Path
|
||||
newRules = append(newRules, extensions.IngressRule{
|
||||
newRules = append(newRules, networkingv1beta1.IngressRule{
|
||||
Host: updateURLMapHost,
|
||||
IngressRuleValue: extensions.IngressRuleValue{
|
||||
HTTP: &extensions.HTTPIngressRuleValue{
|
||||
Paths: []extensions.HTTPIngressPath{
|
||||
IngressRuleValue: networkingv1beta1.IngressRuleValue{
|
||||
HTTP: &networkingv1beta1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1beta1.HTTPIngressPath{
|
||||
{
|
||||
Path: updateURLMapPath,
|
||||
Backend: existingPath.Backend,
|
||||
@@ -223,14 +223,14 @@ func CreateIngressComformanceTests(jig *TestJig, ns string, annotations map[stri
|
||||
tests = append(tests, ConformanceTests{
|
||||
fmt.Sprintf("should update SSL certificate with modified hostname %v", updatedTLSHost),
|
||||
func() {
|
||||
jig.Update(func(ing *extensions.Ingress) {
|
||||
newRules := []extensions.IngressRule{}
|
||||
jig.Update(func(ing *networkingv1beta1.Ingress) {
|
||||
newRules := []networkingv1beta1.IngressRule{}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.Host != tlsHost {
|
||||
newRules = append(newRules, rule)
|
||||
continue
|
||||
}
|
||||
newRules = append(newRules, extensions.IngressRule{
|
||||
newRules = append(newRules, networkingv1beta1.IngressRule{
|
||||
Host: updatedTLSHost,
|
||||
IngressRuleValue: rule.IngressRuleValue,
|
||||
})
|
||||
@@ -368,7 +368,7 @@ type TestJig struct {
|
||||
|
||||
RootCAs map[string][]byte
|
||||
Address string
|
||||
Ingress *extensions.Ingress
|
||||
Ingress *networkingv1beta1.Ingress
|
||||
// class is the value of the annotation keyed under
|
||||
// `kubernetes.io/ingress.class`. It's added to all ingresses created by
|
||||
// this jig.
|
||||
@@ -436,9 +436,9 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri
|
||||
}
|
||||
|
||||
// runCreate runs the required command to create the given ingress.
|
||||
func (j *TestJig) runCreate(ing *extensions.Ingress) (*extensions.Ingress, error) {
|
||||
func (j *TestJig) runCreate(ing *networkingv1beta1.Ingress) (*networkingv1beta1.Ingress, error) {
|
||||
if j.Class != MulticlusterIngressClassValue {
|
||||
return j.Client.ExtensionsV1beta1().Ingresses(ing.Namespace).Create(ing)
|
||||
return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Create(ing)
|
||||
}
|
||||
// Use kubemci to create a multicluster ingress.
|
||||
filePath := framework.TestContext.OutputDir + "/mci.yaml"
|
||||
@@ -450,9 +450,9 @@ func (j *TestJig) runCreate(ing *extensions.Ingress) (*extensions.Ingress, error
|
||||
}
|
||||
|
||||
// runUpdate runs the required command to update the given ingress.
|
||||
func (j *TestJig) runUpdate(ing *extensions.Ingress) (*extensions.Ingress, error) {
|
||||
func (j *TestJig) runUpdate(ing *networkingv1beta1.Ingress) (*networkingv1beta1.Ingress, error) {
|
||||
if j.Class != MulticlusterIngressClassValue {
|
||||
return j.Client.ExtensionsV1beta1().Ingresses(ing.Namespace).Update(ing)
|
||||
return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Update(ing)
|
||||
}
|
||||
// Use kubemci to update a multicluster ingress.
|
||||
// kubemci does not have an update command. We use "create --force" to update an existing ingress.
|
||||
@@ -465,11 +465,11 @@ func (j *TestJig) runUpdate(ing *extensions.Ingress) (*extensions.Ingress, error
|
||||
}
|
||||
|
||||
// Update retrieves the ingress, performs the passed function, and then updates it.
|
||||
func (j *TestJig) Update(update func(ing *extensions.Ingress)) {
|
||||
func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) {
|
||||
var err error
|
||||
ns, name := j.Ingress.Namespace, j.Ingress.Name
|
||||
for i := 0; i < 3; i++ {
|
||||
j.Ingress, err = j.Client.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
j.Ingress, err = j.Client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to get ingress %s/%s: %v", ns, name, err)
|
||||
}
|
||||
@@ -493,8 +493,8 @@ func (j *TestJig) AddHTTPS(secretName string, hosts ...string) {
|
||||
_, cert, _, err := createTLSSecret(j.Client, j.Ingress.Namespace, secretName, hosts...)
|
||||
framework.ExpectNoError(err)
|
||||
j.Logger.Infof("Updating ingress %v to also use secret %v for TLS termination", j.Ingress.Name, secretName)
|
||||
j.Update(func(ing *extensions.Ingress) {
|
||||
ing.Spec.TLS = append(ing.Spec.TLS, extensions.IngressTLS{Hosts: hosts, SecretName: secretName})
|
||||
j.Update(func(ing *networkingv1beta1.Ingress) {
|
||||
ing.Spec.TLS = append(ing.Spec.TLS, networkingv1beta1.IngressTLS{Hosts: hosts, SecretName: secretName})
|
||||
})
|
||||
j.RootCAs[secretName] = cert
|
||||
}
|
||||
@@ -504,8 +504,8 @@ func (j *TestJig) SetHTTPS(secretName string, hosts ...string) {
|
||||
_, cert, _, err := createTLSSecret(j.Client, j.Ingress.Namespace, secretName, hosts...)
|
||||
framework.ExpectNoError(err)
|
||||
j.Logger.Infof("Updating ingress %v to only use secret %v for TLS termination", j.Ingress.Name, secretName)
|
||||
j.Update(func(ing *extensions.Ingress) {
|
||||
ing.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}}
|
||||
j.Update(func(ing *networkingv1beta1.Ingress) {
|
||||
ing.Spec.TLS = []networkingv1beta1.IngressTLS{{Hosts: hosts, SecretName: secretName}}
|
||||
})
|
||||
j.RootCAs = map[string][]byte{secretName: cert}
|
||||
}
|
||||
@@ -513,14 +513,14 @@ func (j *TestJig) SetHTTPS(secretName string, hosts ...string) {
|
||||
// RemoveHTTPS updates the ingress to not use this secret for TLS.
|
||||
// Note: Does not delete the secret.
|
||||
func (j *TestJig) RemoveHTTPS(secretName string) {
|
||||
newTLS := []extensions.IngressTLS{}
|
||||
newTLS := []networkingv1beta1.IngressTLS{}
|
||||
for _, ingressTLS := range j.Ingress.Spec.TLS {
|
||||
if secretName != ingressTLS.SecretName {
|
||||
newTLS = append(newTLS, ingressTLS)
|
||||
}
|
||||
}
|
||||
j.Logger.Infof("Updating ingress %v to not use secret %v for TLS termination", j.Ingress.Name, secretName)
|
||||
j.Update(func(ing *extensions.Ingress) {
|
||||
j.Update(func(ing *networkingv1beta1.Ingress) {
|
||||
ing.Spec.TLS = newTLS
|
||||
})
|
||||
delete(j.RootCAs, secretName)
|
||||
@@ -551,16 +551,16 @@ func (j *TestJig) TryDeleteIngress() {
|
||||
j.tryDeleteGivenIngress(j.Ingress)
|
||||
}
|
||||
|
||||
func (j *TestJig) tryDeleteGivenIngress(ing *extensions.Ingress) {
|
||||
func (j *TestJig) tryDeleteGivenIngress(ing *networkingv1beta1.Ingress) {
|
||||
if err := j.runDelete(ing); err != nil {
|
||||
j.Logger.Infof("Error while deleting the ingress %v/%v with class %s: %v", ing.Namespace, ing.Name, j.Class, err)
|
||||
}
|
||||
}
|
||||
|
||||
// runDelete runs the required command to delete the given ingress.
|
||||
func (j *TestJig) runDelete(ing *extensions.Ingress) error {
|
||||
func (j *TestJig) runDelete(ing *networkingv1beta1.Ingress) error {
|
||||
if j.Class != MulticlusterIngressClassValue {
|
||||
return j.Client.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil)
|
||||
return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil)
|
||||
}
|
||||
// Use kubemci to delete a multicluster ingress.
|
||||
filePath := framework.TestContext.OutputDir + "/mci.yaml"
|
||||
@@ -600,7 +600,7 @@ func getIngressAddress(client clientset.Interface, ns, name, class string) ([]st
|
||||
if class == MulticlusterIngressClassValue {
|
||||
return getIngressAddressFromKubemci(name)
|
||||
}
|
||||
ing, err := client.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
ing, err := client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -635,7 +635,7 @@ func (j *TestJig) WaitForIngressAddress(c clientset.Interface, ns, ingName strin
|
||||
return address, err
|
||||
}
|
||||
|
||||
func (j *TestJig) pollIngressWithCert(ing *extensions.Ingress, address string, knownHosts []string, cert []byte, waitForNodePort bool, timeout time.Duration) error {
|
||||
func (j *TestJig) pollIngressWithCert(ing *networkingv1beta1.Ingress, address string, knownHosts []string, cert []byte, waitForNodePort bool, timeout time.Duration) error {
|
||||
// Check that all rules respond to a simple GET.
|
||||
knownHostsSet := sets.NewString(knownHosts...)
|
||||
for _, rules := range ing.Spec.Rules {
|
||||
@@ -695,7 +695,7 @@ func (j *TestJig) WaitForIngressToStable() {
|
||||
// http or https). If waitForNodePort is true, the NodePort of the Service
|
||||
// is verified before verifying the Ingress. NodePort is currently a
|
||||
// requirement for cloudprovider Ingress.
|
||||
func (j *TestJig) WaitForGivenIngressWithTimeout(ing *extensions.Ingress, waitForNodePort bool, timeout time.Duration) error {
|
||||
func (j *TestJig) WaitForGivenIngressWithTimeout(ing *networkingv1beta1.Ingress, waitForNodePort bool, timeout time.Duration) error {
|
||||
// Wait for the loadbalancer IP.
|
||||
address, err := j.WaitForIngressAddress(j.Client, ing.Namespace, ing.Name, timeout)
|
||||
if err != nil {
|
||||
@@ -864,15 +864,15 @@ func (cont *NginxIngressController) Init() {
|
||||
framework.Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP)
|
||||
}
|
||||
|
||||
func generateBacksideHTTPSIngressSpec(ns string) *extensions.Ingress {
|
||||
return &extensions.Ingress{
|
||||
func generateBacksideHTTPSIngressSpec(ns string) *networkingv1beta1.Ingress {
|
||||
return &networkingv1beta1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoheaders-https",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: extensions.IngressSpec{
|
||||
Spec: networkingv1beta1.IngressSpec{
|
||||
// Note kubemci requires a default backend.
|
||||
Backend: &extensions.IngressBackend{
|
||||
Backend: &networkingv1beta1.IngressBackend{
|
||||
ServiceName: "echoheaders-https",
|
||||
ServicePort: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
@@ -939,7 +939,7 @@ func generateBacksideHTTPSDeploymentSpec() *apps.Deployment {
|
||||
}
|
||||
|
||||
// SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured.
|
||||
func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*apps.Deployment, *v1.Service, *extensions.Ingress, error) {
|
||||
func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*apps.Deployment, *v1.Service, *networkingv1beta1.Ingress, error) {
|
||||
deployCreated, err := cs.AppsV1().Deployments(namespace).Create(generateBacksideHTTPSDeploymentSpec())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
@@ -963,7 +963,7 @@ func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace st
|
||||
}
|
||||
|
||||
// DeleteTestResource deletes given deployment, service and ingress.
|
||||
func (j *TestJig) DeleteTestResource(cs clientset.Interface, deploy *apps.Deployment, svc *v1.Service, ing *extensions.Ingress) []error {
|
||||
func (j *TestJig) DeleteTestResource(cs clientset.Interface, deploy *apps.Deployment, svc *v1.Service, ing *networkingv1beta1.Ingress) []error {
|
||||
var errs []error
|
||||
if ing != nil {
|
||||
if err := j.runDelete(ing); err != nil {
|
||||
|
||||
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
package job
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
@@ -99,22 +101,28 @@ func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Dura
|
||||
})
|
||||
}
|
||||
|
||||
// CheckForAllJobPodsRunning uses c to check in the Job named jobName in ns is running. If the returned error is not
|
||||
// nil the returned bool is true if the Job is running.
|
||||
func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) (bool, error) {
|
||||
// EnsureAllJobPodsRunning uses c to check in the Job named jobName in ns
|
||||
// is running, returning an error if the expected parallelism is not
|
||||
// satisfied.
|
||||
func EnsureAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
podsSummary := make([]string, 0, parallelism)
|
||||
count := int32(0)
|
||||
for _, p := range pods.Items {
|
||||
if p.Status.Phase == v1.PodRunning {
|
||||
count++
|
||||
}
|
||||
podsSummary = append(podsSummary, fmt.Sprintf("%s (%s: %s)", p.ObjectMeta.Name, p.Status.Phase, p.Status.Message))
|
||||
}
|
||||
return count == parallelism, nil
|
||||
if count != parallelism {
|
||||
return fmt.Errorf("job has %d of %d expected running pods: %s", count, parallelism, strings.Join(podsSummary, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForAllJobPodsGone waits for all pods for the Job named jobName in namespace ns
|
||||
|
||||
@@ -65,9 +65,9 @@ const (
|
||||
maxNetProxyPodsCount = 10
|
||||
// SessionAffinityChecks is number of checks to hit a given set of endpoints when enable session affinity.
|
||||
SessionAffinityChecks = 10
|
||||
// Regex to match IPv4 addresses
|
||||
// RegexIPv4 is a regex to match IPv4 addresses
|
||||
RegexIPv4 = "(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)"
|
||||
// Regex to match IPv6 addresses
|
||||
// RegexIPv6 is a regex to match IPv6 addresses
|
||||
RegexIPv6 = "(?:(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){6})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:::(?:(?:(?:[0-9a-fA-F]{1,4})):){5})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){4})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,1}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){3})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,2}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:(?:[0-9a-fA-F]{1,4})):){2})(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,3}(?:(?:[0-9a-fA-F]{1,4})))?::(?:(?:[0-9a-fA-F]{1,4})):)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,4}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9]))\\.){3}(?:(?:25[0-5]|(?:[1-9]|1[0-9]|2[0-4])?[0-9])))))))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,5}(?:(?:[0-9a-fA-F]{1,4})))?::)(?:(?:[0-9a-fA-F]{1,4})))|(?:(?:(?:(?:(?:(?:[0-9a-fA-F]{1,4})):){0,6}(?:(?:[0-9a-fA-F]{1,4})))?::))))"
|
||||
)
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ func etcdUpgradeGCE(targetStorage, targetVersion string) error {
|
||||
os.Environ(),
|
||||
"TEST_ETCD_VERSION="+targetVersion,
|
||||
"STORAGE_BACKEND="+targetStorage,
|
||||
"TEST_ETCD_IMAGE=3.3.10-0")
|
||||
"TEST_ETCD_IMAGE=3.3.10-1")
|
||||
|
||||
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
|
||||
return err
|
||||
@@ -80,7 +80,7 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
|
||||
env = append(env,
|
||||
"TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion,
|
||||
"STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage,
|
||||
"TEST_ETCD_IMAGE=3.3.10-0")
|
||||
"TEST_ETCD_IMAGE=3.3.10-1")
|
||||
} else {
|
||||
// In e2e tests, we skip the confirmation prompt about
|
||||
// implicit etcd upgrades to simulate the user entering "y".
|
||||
|
||||
@@ -76,12 +76,16 @@ const (
|
||||
// Bring the cleanup timeout back down to 5m once b/33588344 is resolved.
|
||||
LoadBalancerCleanupTimeout = 15 * time.Minute
|
||||
|
||||
// LoadBalancerPollTimeout is the time required by the loadbalancer to poll.
|
||||
// On average it takes ~6 minutes for a single backend to come online in GCE.
|
||||
LoadBalancerPollTimeout = 15 * time.Minute
|
||||
LoadBalancerPollTimeout = 15 * time.Minute
|
||||
// LoadBalancerPollInterval is the interval value in which the loadbalancer polls.
|
||||
LoadBalancerPollInterval = 30 * time.Second
|
||||
|
||||
// LargeClusterMinNodesNumber is the number of nodes which a large cluster consists of.
|
||||
LargeClusterMinNodesNumber = 100
|
||||
|
||||
// MaxNodesForEndpointsTests is the max number for testing endpoints.
|
||||
// Don't test with more than 3 nodes.
|
||||
// Many tests create an endpoint per node, in large clusters, this is
|
||||
// resource and time intensive.
|
||||
@@ -236,6 +240,7 @@ func (j *ServiceTestJig) CreateServiceWithServicePort(labels map[string]string,
|
||||
return j.Client.CoreV1().Services(namespace).Create(service)
|
||||
}
|
||||
|
||||
// ChangeServiceType updates the given service's ServiceType to the given newType.
|
||||
func (j *ServiceTestJig) ChangeServiceType(namespace, name string, newType v1.ServiceType, timeout time.Duration) {
|
||||
ingressIP := ""
|
||||
svc := j.UpdateServiceOrFail(namespace, name, func(s *v1.Service) {
|
||||
@@ -318,6 +323,7 @@ func (j *ServiceTestJig) CreateLoadBalancerService(namespace, serviceName string
|
||||
return svc
|
||||
}
|
||||
|
||||
// GetNodeAddresses returns a list of addresses of the given addressType for the given node
|
||||
func GetNodeAddresses(node *v1.Node, addressType v1.NodeAddressType) (ips []string) {
|
||||
for j := range node.Status.Addresses {
|
||||
nodeAddress := &node.Status.Addresses[j]
|
||||
@@ -328,6 +334,7 @@ func GetNodeAddresses(node *v1.Node, addressType v1.NodeAddressType) (ips []stri
|
||||
return
|
||||
}
|
||||
|
||||
// CollectAddresses returns a list of addresses of the given addressType for the given list of nodes
|
||||
func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []string {
|
||||
ips := []string{}
|
||||
for i := range nodes.Items {
|
||||
@@ -336,6 +343,7 @@ func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []stri
|
||||
return ips
|
||||
}
|
||||
|
||||
// GetNodePublicIps returns a public IP list of nodes.
|
||||
func GetNodePublicIps(c clientset.Interface) ([]string, error) {
|
||||
nodes := GetReadySchedulableNodesOrDie(c)
|
||||
|
||||
@@ -347,6 +355,7 @@ func GetNodePublicIps(c clientset.Interface) ([]string, error) {
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
// PickNodeIP picks one public node IP
|
||||
func PickNodeIP(c clientset.Interface) string {
|
||||
publicIps, err := GetNodePublicIps(c)
|
||||
ExpectNoError(err)
|
||||
@@ -415,6 +424,7 @@ func (j *ServiceTestJig) GetNodes(maxNodesForTest int) (nodes *v1.NodeList) {
|
||||
return nodes
|
||||
}
|
||||
|
||||
// GetNodesNames returns a list of names of the first maxNodesForTest nodes
|
||||
func (j *ServiceTestJig) GetNodesNames(maxNodesForTest int) []string {
|
||||
nodes := j.GetNodes(maxNodesForTest)
|
||||
nodesNames := []string{}
|
||||
@@ -424,6 +434,7 @@ func (j *ServiceTestJig) GetNodesNames(maxNodesForTest int) []string {
|
||||
return nodesNames
|
||||
}
|
||||
|
||||
// WaitForEndpointOnNode waits for a service endpoint on the given node.
|
||||
func (j *ServiceTestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName string) {
|
||||
err := wait.PollImmediate(Poll, LoadBalancerCreateTimeoutDefault, func() (bool, error) {
|
||||
endpoints, err := j.Client.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
@@ -451,6 +462,7 @@ func (j *ServiceTestJig) WaitForEndpointOnNode(namespace, serviceName, nodeName
|
||||
ExpectNoError(err)
|
||||
}
|
||||
|
||||
// SanityCheckService performs sanity checks on the given service
|
||||
func (j *ServiceTestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceType) {
|
||||
if svc.Spec.Type != svcType {
|
||||
Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType)
|
||||
@@ -533,6 +545,7 @@ func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func
|
||||
return svc
|
||||
}
|
||||
|
||||
// WaitForNewIngressIPOrFail waits for the given service to get a new ingress IP, or fails after the given timeout
|
||||
func (j *ServiceTestJig) WaitForNewIngressIPOrFail(namespace, name, existingIP string, timeout time.Duration) *v1.Service {
|
||||
Logf("Waiting up to %v for service %q to get a new ingress IP", timeout, name)
|
||||
service := j.waitForConditionOrFail(namespace, name, timeout, "have a new ingress IP", func(svc *v1.Service) bool {
|
||||
@@ -548,6 +561,7 @@ func (j *ServiceTestJig) WaitForNewIngressIPOrFail(namespace, name, existingIP s
|
||||
return service
|
||||
}
|
||||
|
||||
// ChangeServiceNodePortOrFail changes node ports of the given service.
|
||||
func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, initial int) *v1.Service {
|
||||
var err error
|
||||
var service *v1.Service
|
||||
@@ -571,6 +585,7 @@ func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, ini
|
||||
return service
|
||||
}
|
||||
|
||||
// WaitForLoadBalancerOrFail waits the given service to have a LoadBalancer, or fails after the given timeout
|
||||
func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeout time.Duration) *v1.Service {
|
||||
Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name)
|
||||
service := j.waitForConditionOrFail(namespace, name, timeout, "have a load balancer", func(svc *v1.Service) bool {
|
||||
@@ -579,6 +594,7 @@ func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeo
|
||||
return service
|
||||
}
|
||||
|
||||
// WaitForLoadBalancerDestroyOrFail waits the given service to destroy a LoadBalancer, or fails after the given timeout
|
||||
func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip string, port int, timeout time.Duration) *v1.Service {
|
||||
// TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
|
||||
defer func() {
|
||||
@@ -658,6 +674,7 @@ func (j *ServiceTestJig) newRCTemplate(namespace string) *v1.ReplicationControll
|
||||
return rc
|
||||
}
|
||||
|
||||
// AddRCAntiAffinity adds AntiAffinity to the given ReplicationController.
|
||||
func (j *ServiceTestJig) AddRCAntiAffinity(rc *v1.ReplicationController) {
|
||||
var replicas int32 = 2
|
||||
|
||||
@@ -677,6 +694,7 @@ func (j *ServiceTestJig) AddRCAntiAffinity(rc *v1.ReplicationController) {
|
||||
})
|
||||
}
|
||||
|
||||
// CreatePDBOrFail returns a PodDisruptionBudget for the given ReplicationController, or fails if a PodDisruptionBudget isn't ready
|
||||
func (j *ServiceTestJig) CreatePDBOrFail(namespace string, rc *v1.ReplicationController) *policyv1beta1.PodDisruptionBudget {
|
||||
pdb := j.newPDBTemplate(namespace, rc)
|
||||
newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb)
|
||||
@@ -733,6 +751,7 @@ func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *v1.Replicati
|
||||
return result
|
||||
}
|
||||
|
||||
// Scale scales pods to the given replicas
|
||||
func (j *ServiceTestJig) Scale(namespace string, replicas int) {
|
||||
rc := j.Name
|
||||
scale, err := j.Client.CoreV1().ReplicationControllers(namespace).GetScale(rc, metav1.GetOptions{})
|
||||
@@ -839,6 +858,7 @@ func newNetexecPodSpec(podName string, httpPort, udpPort int32, hostNetwork bool
|
||||
return pod
|
||||
}
|
||||
|
||||
// LaunchNetexecPodOnNode launches a netexec pod on the given node.
|
||||
func (j *ServiceTestJig) LaunchNetexecPodOnNode(f *Framework, nodeName, podName string, httpPort, udpPort int32, hostNetwork bool) {
|
||||
Logf("Creating netexec pod %q on node %v in namespace %q", podName, nodeName, f.Namespace.Name)
|
||||
pod := newNetexecPodSpec(podName, httpPort, udpPort, hostNetwork)
|
||||
@@ -887,10 +907,12 @@ func (j *ServiceTestJig) LaunchEchoserverPodOnNode(f *Framework, nodeName, podNa
|
||||
Logf("Echo server pod %q in namespace %q running", pod.Name, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// TestReachableHTTP tests that the given host serves HTTP on the given port.
|
||||
func (j *ServiceTestJig) TestReachableHTTP(host string, port int, timeout time.Duration) {
|
||||
j.TestReachableHTTPWithRetriableErrorCodes(host, port, []int{}, timeout)
|
||||
}
|
||||
|
||||
// TestReachableHTTPWithRetriableErrorCodes tests that the given host serves HTTP on the given port with the given retriableErrCodes.
|
||||
func (j *ServiceTestJig) TestReachableHTTPWithRetriableErrorCodes(host string, port int, retriableErrCodes []int, timeout time.Duration) {
|
||||
pollfn := func() (bool, error) {
|
||||
result := PokeHTTP(host, port, "/echo?msg=hello",
|
||||
@@ -913,6 +935,7 @@ func (j *ServiceTestJig) TestReachableHTTPWithRetriableErrorCodes(host string, p
|
||||
}
|
||||
}
|
||||
|
||||
// TestNotReachableHTTP tests that a HTTP request doesn't connect to the given host and port.
|
||||
func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout time.Duration) {
|
||||
pollfn := func() (bool, error) {
|
||||
result := PokeHTTP(host, port, "/", nil)
|
||||
@@ -927,6 +950,7 @@ func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout tim
|
||||
}
|
||||
}
|
||||
|
||||
// TestRejectedHTTP tests that the given host rejects a HTTP request on the given port.
|
||||
func (j *ServiceTestJig) TestRejectedHTTP(host string, port int, timeout time.Duration) {
|
||||
pollfn := func() (bool, error) {
|
||||
result := PokeHTTP(host, port, "/", nil)
|
||||
@@ -941,6 +965,7 @@ func (j *ServiceTestJig) TestRejectedHTTP(host string, port int, timeout time.Du
|
||||
}
|
||||
}
|
||||
|
||||
// TestReachableUDP tests that the given host serves UDP on the given port.
|
||||
func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Duration) {
|
||||
pollfn := func() (bool, error) {
|
||||
result := PokeUDP(host, port, "echo hello", &UDPPokeParams{
|
||||
@@ -958,6 +983,7 @@ func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Du
|
||||
}
|
||||
}
|
||||
|
||||
// TestNotReachableUDP tests that the given host doesn't serve UDP on the given port.
|
||||
func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time.Duration) {
|
||||
pollfn := func() (bool, error) {
|
||||
result := PokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second})
|
||||
@@ -971,6 +997,7 @@ func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time
|
||||
}
|
||||
}
|
||||
|
||||
// TestRejectedUDP tests that the given host rejects a UDP request on the given port.
|
||||
func (j *ServiceTestJig) TestRejectedUDP(host string, port int, timeout time.Duration) {
|
||||
pollfn := func() (bool, error) {
|
||||
result := PokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second})
|
||||
@@ -984,6 +1011,7 @@ func (j *ServiceTestJig) TestRejectedUDP(host string, port int, timeout time.Dur
|
||||
}
|
||||
}
|
||||
|
||||
// GetHTTPContent returns the content of the given url by HTTP.
|
||||
func (j *ServiceTestJig) GetHTTPContent(host string, port int, timeout time.Duration, url string) bytes.Buffer {
|
||||
var body bytes.Buffer
|
||||
if pollErr := wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
||||
@@ -1028,6 +1056,7 @@ func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, err
|
||||
return false, fmt.Errorf("unexpected HTTP response code %s from health check responder at %s", resp.Status, url)
|
||||
}
|
||||
|
||||
// TestHTTPHealthCheckNodePort tests a HTTP connection by the given request to the given host and port.
|
||||
func (j *ServiceTestJig) TestHTTPHealthCheckNodePort(host string, port int, request string, timeout time.Duration, expectSucceed bool, threshold int) error {
|
||||
count := 0
|
||||
condition := func() (bool, error) {
|
||||
@@ -1063,6 +1092,7 @@ type ServiceTestFixture struct {
|
||||
Image string
|
||||
}
|
||||
|
||||
// NewServerTest creates a new ServiceTestFixture for the tests.
|
||||
func NewServerTest(client clientset.Interface, namespace string, serviceName string) *ServiceTestFixture {
|
||||
t := &ServiceTestFixture{}
|
||||
t.Client = client
|
||||
@@ -1127,6 +1157,7 @@ func (t *ServiceTestFixture) DeleteService(serviceName string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Cleanup cleans all ReplicationControllers and Services which this object holds.
|
||||
func (t *ServiceTestFixture) Cleanup() []error {
|
||||
var errs []error
|
||||
for rcName := range t.rcs {
|
||||
@@ -1175,6 +1206,7 @@ func (t *ServiceTestFixture) Cleanup() []error {
|
||||
return errs
|
||||
}
|
||||
|
||||
// GetIngressPoint returns a host on which ingress serves.
|
||||
func GetIngressPoint(ing *v1.LoadBalancerIngress) string {
|
||||
host := ing.IP
|
||||
if host == "" {
|
||||
@@ -1206,6 +1238,7 @@ func UpdateService(c clientset.Interface, namespace, serviceName string, update
|
||||
return service, err
|
||||
}
|
||||
|
||||
// GetContainerPortsByPodUID returns a PortsByPodUID map on the given endpoints.
|
||||
func GetContainerPortsByPodUID(endpoints *v1.Endpoints) PortsByPodUID {
|
||||
m := PortsByPodUID{}
|
||||
for _, ss := range endpoints.Subsets {
|
||||
@@ -1222,7 +1255,10 @@ func GetContainerPortsByPodUID(endpoints *v1.Endpoints) PortsByPodUID {
|
||||
return m
|
||||
}
|
||||
|
||||
// PortsByPodName maps pod name to ports.
|
||||
type PortsByPodName map[string][]int
|
||||
|
||||
// PortsByPodUID maps UID to ports.
|
||||
type PortsByPodUID map[types.UID][]int
|
||||
|
||||
func translatePodNameToUIDOrFail(c clientset.Interface, ns string, expectedEndpoints PortsByPodName) PortsByPodUID {
|
||||
@@ -1261,6 +1297,7 @@ func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUI
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateEndpointsOrFail validates that the given service exists and is served by the given expectedEndpoints.
|
||||
func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName string, expectedEndpoints PortsByPodName) {
|
||||
ginkgo.By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", ServiceStartTimeout, serviceName, namespace, expectedEndpoints))
|
||||
i := 1
|
||||
@@ -1348,6 +1385,7 @@ func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string
|
||||
return podNames, serviceIP, nil
|
||||
}
|
||||
|
||||
// StopServeHostnameService stops the given service.
|
||||
func StopServeHostnameService(clientset clientset.Interface, ns, name string) error {
|
||||
if err := DeleteRCAndWaitForGC(clientset, ns, name); err != nil {
|
||||
return err
|
||||
@@ -1439,6 +1477,7 @@ func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expect
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyServeHostnameServiceDown verifies that the given service isn't served.
|
||||
func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceIP string, servicePort int) error {
|
||||
ipPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
|
||||
// The current versions of curl included in CentOS and RHEL distros
|
||||
@@ -1466,6 +1505,7 @@ func CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zo
|
||||
TestContext.CloudConfig.Provider.CleanupServiceResources(c, loadBalancerName, region, zone)
|
||||
}
|
||||
|
||||
// DescribeSvc logs the output of kubectl describe svc for the given namespace
|
||||
func DescribeSvc(ns string) {
|
||||
Logf("\nOutput of kubectl describe svc:\n")
|
||||
desc, _ := RunKubectl(
|
||||
@@ -1473,6 +1513,7 @@ func DescribeSvc(ns string) {
|
||||
Logf(desc)
|
||||
}
|
||||
|
||||
// CreateServiceSpec returns a Service object for testing.
|
||||
func CreateServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *v1.Service {
|
||||
headlessService := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1502,6 +1543,7 @@ func EnableAndDisableInternalLB() (enable func(svc *v1.Service), disable func(sv
|
||||
return TestContext.CloudConfig.Provider.EnableAndDisableInternalLB()
|
||||
}
|
||||
|
||||
// GetServiceLoadBalancerCreationTimeout returns a timeout value for creating a load balancer of a service.
|
||||
func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration {
|
||||
if nodes := GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > LargeClusterMinNodesNumber {
|
||||
return LoadBalancerCreateTimeoutLarge
|
||||
|
||||
@@ -133,8 +133,10 @@ const (
|
||||
// Poll is how often to Poll pods, nodes and claims.
|
||||
Poll = 2 * time.Second
|
||||
|
||||
// PollShortTimeout is the short timeout value in polling.
|
||||
PollShortTimeout = 1 * time.Minute
|
||||
PollLongTimeout = 5 * time.Minute
|
||||
// PollLongTimeout is the long timeout value in polling.
|
||||
PollLongTimeout = 5 * time.Minute
|
||||
|
||||
// ServiceAccountProvisionTimeout is how long to wait for a service account to be provisioned.
|
||||
// service accounts are provisioned after namespace creation
|
||||
@@ -538,6 +540,23 @@ func logPodStates(pods []v1.Pod) {
|
||||
Logf("") // Final empty line helps for readability.
|
||||
}
|
||||
|
||||
// logPodTerminationMessages logs termination messages for failing pods. It's a short snippet (much smaller than full logs), but it often shows
|
||||
// why pods crashed and since it is in the API, it's fast to retrieve.
|
||||
func logPodTerminationMessages(pods []v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
for _, status := range pod.Status.InitContainerStatuses {
|
||||
if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 {
|
||||
Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
|
||||
}
|
||||
}
|
||||
for _, status := range pod.Status.ContainerStatuses {
|
||||
if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 {
|
||||
Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// errorBadPodsStates create error message of basic info of bad pods for debugging.
|
||||
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
|
||||
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
|
||||
@@ -2039,6 +2058,11 @@ func RandomSuffix() string {
|
||||
return strconv.Itoa(r.Int() % 10000)
|
||||
}
|
||||
|
||||
// ExpectError expects an error happens, otherwise an exception raises
|
||||
func ExpectError(err error, explain ...interface{}) {
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
// ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error.
|
||||
func ExpectNoError(err error, explain ...interface{}) {
|
||||
ExpectNoErrorWithOffset(1, err, explain...)
|
||||
@@ -2417,6 +2441,8 @@ func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
|
||||
return c.CoreV1().Events(ns).List(opts)
|
||||
}, namespace)
|
||||
|
||||
dumpAllPodInfoForNamespace(c, namespace)
|
||||
|
||||
// If cluster is large, then the following logs are basically useless, because:
|
||||
// 1. it takes tens of minutes or hours to grab all of them
|
||||
// 2. there are so many of them that working with them are mostly impossible
|
||||
@@ -2424,7 +2450,6 @@ func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
|
||||
maxNodesForDump := TestContext.MaxNodesToGather
|
||||
if nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}); err == nil {
|
||||
if len(nodes.Items) <= maxNodesForDump {
|
||||
dumpAllPodInfo(c)
|
||||
dumpAllNodeInfo(c)
|
||||
} else {
|
||||
Logf("skipping dumping cluster info - cluster too large")
|
||||
@@ -2447,12 +2472,13 @@ func (o byFirstTimestamp) Less(i, j int) bool {
|
||||
return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp)
|
||||
}
|
||||
|
||||
func dumpAllPodInfo(c clientset.Interface) {
|
||||
pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{})
|
||||
func dumpAllPodInfoForNamespace(c clientset.Interface, namespace string) {
|
||||
pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("unable to fetch pod debug info: %v", err)
|
||||
}
|
||||
logPodStates(pods.Items)
|
||||
logPodTerminationMessages(pods.Items)
|
||||
}
|
||||
|
||||
func dumpAllNodeInfo(c clientset.Interface) {
|
||||
|
||||
@@ -16,6 +16,7 @@ go_library(
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/config:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/elasticsearch:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/stackdriver:go_default_library",
|
||||
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@@ -61,7 +62,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
||||
// being run as the first e2e test just after the e2e cluster has been created.
|
||||
err := wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
||||
if _, err := s.Get("kibana-logging", metav1.GetOptions{}); err != nil {
|
||||
framework.Logf("Kibana is unreachable: %v", err)
|
||||
e2elog.Logf("Kibana is unreachable: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@@ -83,7 +84,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
||||
err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
||||
req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get services proxy request: %v", err)
|
||||
e2elog.Logf("Failed to get services proxy request: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -95,7 +96,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
||||
Name("kibana-logging").
|
||||
DoRaw()
|
||||
if err != nil {
|
||||
framework.Logf("Proxy call to kibana-logging failed: %v", err)
|
||||
e2elog.Logf("Proxy call to kibana-logging failed: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||
)
|
||||
|
||||
@@ -54,7 +55,7 @@ func newEsLogProvider(f *framework.Framework) (*esLogProvider, error) {
|
||||
func (p *esLogProvider) Init() error {
|
||||
f := p.Framework
|
||||
// Check for the existence of the Elasticsearch service.
|
||||
framework.Logf("Checking the Elasticsearch service exists.")
|
||||
e2elog.Logf("Checking the Elasticsearch service exists.")
|
||||
s := f.ClientSet.CoreV1().Services(api.NamespaceSystem)
|
||||
// Make a few attempts to connect. This makes the test robust against
|
||||
// being run as the first e2e test just after the e2e cluster has been created.
|
||||
@@ -63,14 +64,14 @@ func (p *esLogProvider) Init() error {
|
||||
if _, err = s.Get("elasticsearch-logging", meta_v1.GetOptions{}); err == nil {
|
||||
break
|
||||
}
|
||||
framework.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start))
|
||||
e2elog.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for the Elasticsearch pods to enter the running state.
|
||||
framework.Logf("Checking to make sure the Elasticsearch pods are running")
|
||||
e2elog.Logf("Checking to make sure the Elasticsearch pods are running")
|
||||
labelSelector := fields.SelectorFromSet(fields.Set(map[string]string{"k8s-app": "elasticsearch-logging"})).String()
|
||||
options := meta_v1.ListOptions{LabelSelector: labelSelector}
|
||||
pods, err := f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options)
|
||||
@@ -84,7 +85,7 @@ func (p *esLogProvider) Init() error {
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("Checking to make sure we are talking to an Elasticsearch service.")
|
||||
e2elog.Logf("Checking to make sure we are talking to an Elasticsearch service.")
|
||||
// Perform a few checks to make sure this looks like an Elasticsearch cluster.
|
||||
var statusCode int
|
||||
err = nil
|
||||
@@ -92,7 +93,7 @@ func (p *esLogProvider) Init() error {
|
||||
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||
e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||
continue
|
||||
}
|
||||
// Query against the root URL for Elasticsearch.
|
||||
@@ -103,11 +104,11 @@ func (p *esLogProvider) Init() error {
|
||||
response.StatusCode(&statusCode)
|
||||
|
||||
if err != nil {
|
||||
framework.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err)
|
||||
e2elog.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err)
|
||||
continue
|
||||
}
|
||||
if int(statusCode) != 200 {
|
||||
framework.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode)
|
||||
e2elog.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode)
|
||||
continue
|
||||
}
|
||||
break
|
||||
@@ -121,12 +122,12 @@ func (p *esLogProvider) Init() error {
|
||||
|
||||
// Now assume we really are talking to an Elasticsearch instance.
|
||||
// Check the cluster health.
|
||||
framework.Logf("Checking health of Elasticsearch service.")
|
||||
e2elog.Logf("Checking health of Elasticsearch service.")
|
||||
healthy := false
|
||||
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||
e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||
continue
|
||||
}
|
||||
body, err = proxyRequest.Namespace(api.NamespaceSystem).
|
||||
@@ -140,17 +141,17 @@ func (p *esLogProvider) Init() error {
|
||||
health := make(map[string]interface{})
|
||||
err := json.Unmarshal(body, &health)
|
||||
if err != nil {
|
||||
framework.Logf("Bad json response from elasticsearch: %v", err)
|
||||
e2elog.Logf("Bad json response from elasticsearch: %v", err)
|
||||
continue
|
||||
}
|
||||
statusIntf, ok := health["status"]
|
||||
if !ok {
|
||||
framework.Logf("No status field found in cluster health response: %v", health)
|
||||
e2elog.Logf("No status field found in cluster health response: %v", health)
|
||||
continue
|
||||
}
|
||||
status := statusIntf.(string)
|
||||
if status != "green" && status != "yellow" {
|
||||
framework.Logf("Cluster health has bad status: %v", health)
|
||||
e2elog.Logf("Cluster health has bad status: %v", health)
|
||||
continue
|
||||
}
|
||||
if err == nil && ok {
|
||||
@@ -174,12 +175,12 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
||||
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
framework.Logf("Failed to get services proxy request: %v", errProxy)
|
||||
e2elog.Logf("Failed to get services proxy request: %v", errProxy)
|
||||
return nil
|
||||
}
|
||||
|
||||
query := fmt.Sprintf("kubernetes.pod_name:%s AND kubernetes.namespace_name:%s", name, f.Namespace.Name)
|
||||
framework.Logf("Sending a search request to Elasticsearch with the following query: %s", query)
|
||||
e2elog.Logf("Sending a search request to Elasticsearch with the following query: %s", query)
|
||||
|
||||
// Ask Elasticsearch to return all the log lines that were tagged with the
|
||||
// pod name. Ask for ten times as many log lines because duplication is possible.
|
||||
@@ -191,26 +192,26 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
||||
Param("size", strconv.Itoa(searchPageSize)).
|
||||
DoRaw()
|
||||
if err != nil {
|
||||
framework.Logf("Failed to make proxy call to elasticsearch-logging: %v", err)
|
||||
e2elog.Logf("Failed to make proxy call to elasticsearch-logging: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var response map[string]interface{}
|
||||
err = json.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to unmarshal response: %v", err)
|
||||
e2elog.Logf("Failed to unmarshal response: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
hits, ok := response["hits"].(map[string]interface{})
|
||||
if !ok {
|
||||
framework.Logf("response[hits] not of the expected type: %T", response["hits"])
|
||||
e2elog.Logf("response[hits] not of the expected type: %T", response["hits"])
|
||||
return nil
|
||||
}
|
||||
|
||||
h, ok := hits["hits"].([]interface{})
|
||||
if !ok {
|
||||
framework.Logf("Hits not of the expected type: %T", hits["hits"])
|
||||
e2elog.Logf("Hits not of the expected type: %T", hits["hits"])
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -219,13 +220,13 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
||||
for _, e := range h {
|
||||
l, ok := e.(map[string]interface{})
|
||||
if !ok {
|
||||
framework.Logf("Element of hit not of expected type: %T", e)
|
||||
e2elog.Logf("Element of hit not of expected type: %T", e)
|
||||
continue
|
||||
}
|
||||
|
||||
source, ok := l["_source"].(map[string]interface{})
|
||||
if !ok {
|
||||
framework.Logf("_source not of the expected type: %T", l["_source"])
|
||||
e2elog.Logf("_source not of the expected type: %T", l["_source"])
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -241,7 +242,7 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
||||
continue
|
||||
}
|
||||
|
||||
framework.Logf("Log is of unknown type, got %v, want string or object in field 'log'", source)
|
||||
e2elog.Logf("Log is of unknown type, got %v, want string or object in field 'log'", source)
|
||||
}
|
||||
|
||||
return entries
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/config"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@@ -61,14 +62,14 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti
|
||||
defer wg.Done()
|
||||
defer ginkgo.GinkgoRecover()
|
||||
wave := fmt.Sprintf("wave%v", strconv.Itoa(i))
|
||||
framework.Logf("Starting logging soak, wave = %v", wave)
|
||||
e2elog.Logf("Starting logging soak, wave = %v", wave)
|
||||
RunLogPodsWithSleepOf(f, kbRateInSeconds, wave, totalLogTime)
|
||||
framework.Logf("Completed logging soak, wave %v", i)
|
||||
e2elog.Logf("Completed logging soak, wave %v", i)
|
||||
}()
|
||||
// Niceness.
|
||||
time.Sleep(loggingSoak.TimeBetweenWaves)
|
||||
}
|
||||
framework.Logf("Waiting on all %v logging soak waves to complete", loggingSoak.Scale)
|
||||
e2elog.Logf("Waiting on all %v logging soak waves to complete", loggingSoak.Scale)
|
||||
wg.Wait()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||
|
||||
@@ -155,7 +156,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
|
||||
podName := fmt.Sprintf("synthlogger-%s", string(uuid.NewUUID()))
|
||||
err := utils.NewLoadLoggingPod(podName, "", 1, 1*time.Second).Start(f)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to create a logging pod: %v", err)
|
||||
e2elog.Logf("Failed to create a logging pod: %v", err)
|
||||
}
|
||||
return false, nil
|
||||
}, stopCh)
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||
|
||||
@@ -85,7 +86,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
|
||||
// Starting one pod on each node.
|
||||
for _, pod := range podsByRun[runIdx] {
|
||||
if err := pod.Start(f); err != nil {
|
||||
framework.Logf("Failed to start pod: %v", err)
|
||||
e2elog.Logf("Failed to start pod: %v", err)
|
||||
}
|
||||
}
|
||||
<-t.C
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||
|
||||
"golang.org/x/oauth2/google"
|
||||
@@ -117,7 +118,7 @@ func ensureProjectHasSinkCapacity(sinksService *sd.ProjectsSinksService, project
|
||||
return err
|
||||
}
|
||||
if len(listResponse.Sinks) >= stackdriverSinkCountLimit {
|
||||
framework.Logf("Reached Stackdriver sink limit. Deleting all sinks")
|
||||
e2elog.Logf("Reached Stackdriver sink limit. Deleting all sinks")
|
||||
deleteSinks(sinksService, projectID, listResponse.Sinks)
|
||||
}
|
||||
return nil
|
||||
@@ -136,7 +137,7 @@ func deleteSinks(sinksService *sd.ProjectsSinksService, projectID string, sinks
|
||||
for _, sink := range sinks {
|
||||
sinkNameID := fmt.Sprintf("projects/%s/sinks/%s", projectID, sink.Name)
|
||||
if _, err := sinksService.Delete(sinkNameID).Do(); err != nil {
|
||||
framework.Logf("Failed to delete LogSink: %v", err)
|
||||
e2elog.Logf("Failed to delete LogSink: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -185,21 +186,21 @@ func (p *sdLogProvider) Cleanup() {
|
||||
sinkNameID := fmt.Sprintf("projects/%s/sinks/%s", projectID, p.logSink.Name)
|
||||
sinksService := p.sdService.Projects.Sinks
|
||||
if _, err := sinksService.Delete(sinkNameID).Do(); err != nil {
|
||||
framework.Logf("Failed to delete LogSink: %v", err)
|
||||
e2elog.Logf("Failed to delete LogSink: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if p.subscription != nil {
|
||||
subsService := p.pubsubService.Projects.Subscriptions
|
||||
if _, err := subsService.Delete(p.subscription.Name).Do(); err != nil {
|
||||
framework.Logf("Failed to delete PubSub subscription: %v", err)
|
||||
e2elog.Logf("Failed to delete PubSub subscription: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if p.topic != nil {
|
||||
topicsService := p.pubsubService.Projects.Topics
|
||||
if _, err := topicsService.Delete(p.topic.Name).Do(); err != nil {
|
||||
framework.Logf("Failed to delete PubSub topic: %v", err)
|
||||
e2elog.Logf("Failed to delete PubSub topic: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -234,7 +235,7 @@ func (p *sdLogProvider) createSink(projectID, sinkName, topicName string) (*sd.L
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
framework.Logf("Using the following filter for log entries: %s", filter)
|
||||
e2elog.Logf("Using the following filter for log entries: %s", filter)
|
||||
sink := &sd.LogSink{
|
||||
Name: sinkName,
|
||||
Destination: fmt.Sprintf("pubsub.googleapis.com/%s", topicName),
|
||||
@@ -280,20 +281,20 @@ func (p *sdLogProvider) authorizeSink() error {
|
||||
}
|
||||
|
||||
func (p *sdLogProvider) waitSinkInit() error {
|
||||
framework.Logf("Waiting for log sink to become operational")
|
||||
e2elog.Logf("Waiting for log sink to become operational")
|
||||
return wait.Poll(1*time.Second, sinkStartupTimeout, func() (bool, error) {
|
||||
err := publish(p.pubsubService, p.topic, "embrace eternity")
|
||||
if err != nil {
|
||||
framework.Logf("Failed to push message to PubSub due to %v", err)
|
||||
e2elog.Logf("Failed to push message to PubSub due to %v", err)
|
||||
}
|
||||
|
||||
messages, err := pullAndAck(p.pubsubService, p.subscription)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to pull messages from PubSub due to %v", err)
|
||||
e2elog.Logf("Failed to pull messages from PubSub due to %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if len(messages) > 0 {
|
||||
framework.Logf("Sink %s is operational", p.logSink.Name)
|
||||
e2elog.Logf("Sink %s is operational", p.logSink.Name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -318,32 +319,32 @@ func (p *sdLogProvider) startPollingLogs() {
|
||||
func (p *sdLogProvider) pollLogsOnce() {
|
||||
messages, err := pullAndAck(p.pubsubService, p.subscription)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to pull messages from PubSub due to %v", err)
|
||||
e2elog.Logf("Failed to pull messages from PubSub due to %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, msg := range messages {
|
||||
logEntryEncoded, err := base64.StdEncoding.DecodeString(msg.Message.Data)
|
||||
if err != nil {
|
||||
framework.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data)
|
||||
e2elog.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data)
|
||||
continue
|
||||
}
|
||||
|
||||
var sdLogEntry sd.LogEntry
|
||||
if err := json.Unmarshal(logEntryEncoded, &sdLogEntry); err != nil {
|
||||
framework.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err)
|
||||
e2elog.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err)
|
||||
continue
|
||||
}
|
||||
|
||||
name, ok := p.tryGetName(sdLogEntry)
|
||||
if !ok {
|
||||
framework.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type)
|
||||
e2elog.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type)
|
||||
continue
|
||||
}
|
||||
|
||||
logEntry, err := convertLogEntry(sdLogEntry)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to parse Stackdriver LogEntry: %v", err)
|
||||
e2elog.Logf("Failed to parse Stackdriver LogEntry: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -407,7 +408,7 @@ func pullAndAck(service *pubsub.Service, subs *pubsub.Subscription) ([]*pubsub.R
|
||||
if len(ids) > 0 {
|
||||
ackReq := &pubsub.AcknowledgeRequest{AckIds: ids}
|
||||
if _, err = subsService.Acknowledge(subs.Name, ackReq).Do(); err != nil {
|
||||
framework.Logf("Failed to ack poll: %v", err)
|
||||
e2elog.Logf("Failed to ack poll: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/utils/integer:go_default_library",
|
||||
],
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/utils/integer"
|
||||
)
|
||||
|
||||
@@ -67,13 +68,13 @@ func EnsureLoggingAgentRestartsCount(f *framework.Framework, appName string, max
|
||||
for _, pod := range agentPods.Items {
|
||||
contStatuses := pod.Status.ContainerStatuses
|
||||
if len(contStatuses) == 0 {
|
||||
framework.Logf("There are no container statuses for pod %s", pod.Name)
|
||||
e2elog.Logf("There are no container statuses for pod %s", pod.Name)
|
||||
continue
|
||||
}
|
||||
restartCount := int(contStatuses[0].RestartCount)
|
||||
maxRestartCount = integer.IntMax(maxRestartCount, restartCount)
|
||||
|
||||
framework.Logf("Logging agent %s on node %s was restarted %d times",
|
||||
e2elog.Logf("Logging agent %s on node %s was restarted %d times",
|
||||
pod.Name, pod.Spec.NodeName, restartCount)
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@@ -93,7 +94,7 @@ func (p *loadLoggingPod) Name() string {
|
||||
}
|
||||
|
||||
func (p *loadLoggingPod) Start(f *framework.Framework) error {
|
||||
framework.Logf("Starting load logging pod %s", p.name)
|
||||
e2elog.Logf("Starting load logging pod %s", p.name)
|
||||
f.PodClient().Create(&api_v1.Pod{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: p.name,
|
||||
@@ -168,7 +169,7 @@ func (p *execLoggingPod) Name() string {
|
||||
}
|
||||
|
||||
func (p *execLoggingPod) Start(f *framework.Framework) error {
|
||||
framework.Logf("Starting repeating logging pod %s", p.name)
|
||||
e2elog.Logf("Starting repeating logging pod %s", p.name)
|
||||
f.PodClient().Create(&api_v1.Pod{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: p.name,
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
// LogChecker is an interface for an entity that can check whether logging
|
||||
@@ -195,14 +195,14 @@ func getFullIngestionTimeout(podsMap map[string]FiniteLoggingPod, slack float64)
|
||||
totalWant += want
|
||||
}
|
||||
if len(lossMsgs) > 0 {
|
||||
framework.Logf("Still missing logs from:\n%s", strings.Join(lossMsgs, "\n"))
|
||||
e2elog.Logf("Still missing logs from:\n%s", strings.Join(lossMsgs, "\n"))
|
||||
}
|
||||
lostFrac := 1 - float64(totalGot)/float64(totalWant)
|
||||
if lostFrac > slack {
|
||||
return fmt.Errorf("still missing %.2f%% of logs, only %.2f%% is tolerable",
|
||||
lostFrac*100, slack*100)
|
||||
}
|
||||
framework.Logf("Missing %.2f%% of logs, which is lower than the threshold %.2f%%",
|
||||
e2elog.Logf("Missing %.2f%% of logs, which is lower than the threshold %.2f%%",
|
||||
lostFrac*100, slack*100)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/config:go_default_library",
|
||||
"//test/e2e/framework/gpu:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/gpu"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||
"k8s.io/kubernetes/test/utils/image"
|
||||
@@ -101,7 +102,7 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
|
||||
pollingFunction := checkForAcceleratorMetrics(projectID, gcmService, time.Now(), metricsMap)
|
||||
err = wait.Poll(pollFrequency, pollTimeout, pollingFunction)
|
||||
if err != nil {
|
||||
framework.Logf("Missing metrics: %+v", metricsMap)
|
||||
e2elog.Logf("Missing metrics: %+v", metricsMap)
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
@@ -119,9 +120,9 @@ func checkForAcceleratorMetrics(projectID string, gcmService *gcm.Service, start
|
||||
if len(ts) > 0 {
|
||||
counter = counter + 1
|
||||
metricsMap[metric] = true
|
||||
framework.Logf("Received %v timeseries for metric %v", len(ts), metric)
|
||||
e2elog.Logf("Received %v timeseries for metric %v", len(ts), metric)
|
||||
} else {
|
||||
framework.Logf("No timeseries for metric %v", metric)
|
||||
e2elog.Logf("No timeseries for metric %v", metric)
|
||||
}
|
||||
}
|
||||
if counter < 3 {
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/config"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@@ -71,7 +72,7 @@ func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration)
|
||||
if maxRetries--; maxRetries <= 0 {
|
||||
break
|
||||
}
|
||||
framework.Logf("failed to retrieve kubelet stats -\n %v", errors)
|
||||
e2elog.Logf("failed to retrieve kubelet stats -\n %v", errors)
|
||||
time.Sleep(cadvisor.SleepDuration)
|
||||
}
|
||||
framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -264,20 +265,20 @@ func CreateAdapter(adapterDeploymentFile string) error {
|
||||
return err
|
||||
}
|
||||
stat, err := framework.RunKubectl("create", "-f", adapterURL)
|
||||
framework.Logf(stat)
|
||||
e2elog.Logf(stat)
|
||||
return err
|
||||
}
|
||||
|
||||
func createClusterAdminBinding() error {
|
||||
stdout, stderr, err := framework.RunCmd("gcloud", "config", "get-value", "core/account")
|
||||
if err != nil {
|
||||
framework.Logf(stderr)
|
||||
e2elog.Logf(stderr)
|
||||
return err
|
||||
}
|
||||
serviceAccount := strings.TrimSpace(stdout)
|
||||
framework.Logf("current service account: %q", serviceAccount)
|
||||
e2elog.Logf("current service account: %q", serviceAccount)
|
||||
stat, err := framework.RunKubectl("create", "clusterrolebinding", ClusterAdminBinding, "--clusterrole=cluster-admin", "--user="+serviceAccount)
|
||||
framework.Logf(stat)
|
||||
e2elog.Logf(stat)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -306,32 +307,32 @@ func CreateDescriptors(service *gcm.Service, projectID string) error {
|
||||
func CleanupDescriptors(service *gcm.Service, projectID string) {
|
||||
_, err := service.Projects.MetricDescriptors.Delete(fmt.Sprintf("projects/%s/metricDescriptors/custom.googleapis.com/%s", projectID, CustomMetricName)).Do()
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err)
|
||||
e2elog.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err)
|
||||
}
|
||||
_, err = service.Projects.MetricDescriptors.Delete(fmt.Sprintf("projects/%s/metricDescriptors/custom.googleapis.com/%s", projectID, UnusedMetricName)).Do()
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err)
|
||||
e2elog.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// CleanupAdapter deletes Custom Metrics - Stackdriver adapter deployments.
|
||||
func CleanupAdapter(adapterDeploymentFile string) {
|
||||
stat, err := framework.RunKubectl("delete", "-f", adapterDeploymentFile)
|
||||
framework.Logf(stat)
|
||||
e2elog.Logf(stat)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete adapter deployments: %s", err)
|
||||
e2elog.Logf("Failed to delete adapter deployments: %s", err)
|
||||
}
|
||||
err = exec.Command("rm", adapterDeploymentFile).Run()
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete adapter deployment file: %s", err)
|
||||
e2elog.Logf("Failed to delete adapter deployment file: %s", err)
|
||||
}
|
||||
cleanupClusterAdminBinding()
|
||||
}
|
||||
|
||||
func cleanupClusterAdminBinding() {
|
||||
stat, err := framework.RunKubectl("delete", "clusterrolebinding", ClusterAdminBinding)
|
||||
framework.Logf(stat)
|
||||
e2elog.Logf(stat)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete cluster admin binding: %s", err)
|
||||
e2elog.Logf("Failed to delete cluster admin binding: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
customclient "k8s.io/metrics/pkg/client/custom_metrics"
|
||||
externalclient "k8s.io/metrics/pkg/client/external_metrics"
|
||||
)
|
||||
@@ -257,11 +258,11 @@ func verifyResponseFromExternalMetricsAPI(f *framework.Framework, externalMetric
|
||||
func cleanupSDExporterPod(f *framework.Framework, cs clientset.Interface) {
|
||||
err := cs.CoreV1().Pods(f.Namespace.Name).Delete(stackdriverExporterPod1, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod1, err)
|
||||
e2elog.Logf("Failed to delete %s pod: %v", stackdriverExporterPod1, err)
|
||||
}
|
||||
err = cs.CoreV1().Pods(f.Namespace.Name).Delete(stackdriverExporterPod2, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod2, err)
|
||||
e2elog.Logf("Failed to delete %s pod: %v", stackdriverExporterPod2, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
|
||||
@@ -71,7 +72,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
||||
}
|
||||
}
|
||||
if !masterRegistered {
|
||||
framework.Logf("Master is node api.Registry. Skipping testing Scheduler metrics.")
|
||||
e2elog.Logf("Master is node api.Registry. Skipping testing Scheduler metrics.")
|
||||
return
|
||||
}
|
||||
response, err := grabber.GrabFromScheduler()
|
||||
@@ -92,7 +93,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
||||
}
|
||||
}
|
||||
if !masterRegistered {
|
||||
framework.Logf("Master is node api.Registry. Skipping testing ControllerManager metrics.")
|
||||
e2elog.Logf("Master is node api.Registry. Skipping testing ControllerManager metrics.")
|
||||
return
|
||||
}
|
||||
response, err := grabber.GrabFromControllerManager()
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
)
|
||||
|
||||
@@ -171,7 +172,7 @@ func validateQueryReturnsCorrectValues(c clientset.Interface, query string, expe
|
||||
if len(samples) < minSamplesCount {
|
||||
return fmt.Errorf("Not enough samples for query '%v', got %v", query, samples)
|
||||
}
|
||||
framework.Logf("Executed query '%v' returned %v", query, samples)
|
||||
e2elog.Logf("Executed query '%v' returned %v", query, samples)
|
||||
for _, value := range samples {
|
||||
error := math.Abs(value-expectedValue) / expectedValue
|
||||
if error >= errorTolerance {
|
||||
@@ -238,7 +239,7 @@ func fetchPrometheusTargetDiscovery(c clientset.Interface) (TargetDiscovery, err
|
||||
Raw()
|
||||
var qres promTargetsResponse
|
||||
if err != nil {
|
||||
framework.Logf(string(response))
|
||||
e2elog.Logf(string(response))
|
||||
return qres.Data, err
|
||||
}
|
||||
err = json.Unmarshal(response, &qres)
|
||||
@@ -303,7 +304,7 @@ func queryPrometheus(c clientset.Interface, query string, start, end time.Time,
|
||||
Do().
|
||||
Raw()
|
||||
if err != nil {
|
||||
framework.Logf(string(response))
|
||||
e2elog.Logf(string(response))
|
||||
return nil, err
|
||||
}
|
||||
var qres promQueryResponse
|
||||
@@ -369,7 +370,7 @@ func retryUntilSucceeds(validator func() error, timeout time.Duration) {
|
||||
if time.Since(startTime) >= timeout {
|
||||
break
|
||||
}
|
||||
framework.Logf(err.Error())
|
||||
e2elog.Logf(err.Error())
|
||||
time.Sleep(prometheusSleepBetweenAttempts)
|
||||
}
|
||||
framework.Failf(err.Error())
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
@@ -83,7 +84,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
|
||||
// and uncomment following lines (comment out the two lines above): (DON'T set the env var below)
|
||||
/*
|
||||
ts, err := google.DefaultTokenSource(oauth2.NoContext)
|
||||
framework.Logf("Couldn't get application default credentials, %v", err)
|
||||
e2elog.Logf("Couldn't get application default credentials, %v", err)
|
||||
if err != nil {
|
||||
framework.Failf("Error accessing application default credentials, %v", err)
|
||||
}
|
||||
@@ -110,7 +111,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
|
||||
pollingFunction := checkForMetrics(projectID, gcmService, time.Now(), metricsMap, allPodsCPU, perPodCPU)
|
||||
err = wait.Poll(pollFrequency, pollTimeout, pollingFunction)
|
||||
if err != nil {
|
||||
framework.Logf("Missing metrics: %+v\n", metricsMap)
|
||||
e2elog.Logf("Missing metrics: %+v\n", metricsMap)
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
@@ -129,9 +130,9 @@ func checkForMetrics(projectID string, gcmService *gcm.Service, start time.Time,
|
||||
if len(ts) > 0 {
|
||||
counter = counter + 1
|
||||
metricsMap[metric] = true
|
||||
framework.Logf("Received %v timeseries for metric %v\n", len(ts), metric)
|
||||
e2elog.Logf("Received %v timeseries for metric %v\n", len(ts), metric)
|
||||
} else {
|
||||
framework.Logf("No timeseries for metric %v\n", metric)
|
||||
e2elog.Logf("No timeseries for metric %v\n", metric)
|
||||
}
|
||||
|
||||
var sum float64
|
||||
@@ -148,10 +149,10 @@ func checkForMetrics(projectID string, gcmService *gcm.Service, start time.Time,
|
||||
}
|
||||
}
|
||||
sum = sum + *max.Value.DoubleValue
|
||||
framework.Logf("Received %v points for metric %v\n",
|
||||
e2elog.Logf("Received %v points for metric %v\n",
|
||||
len(t.Points), metric)
|
||||
}
|
||||
framework.Logf("Most recent cpu/utilization sum*cpu/limit: %v\n", sum*float64(cpuLimit))
|
||||
e2elog.Logf("Most recent cpu/utilization sum*cpu/limit: %v\n", sum*float64(cpuLimit))
|
||||
if math.Abs(sum*float64(cpuLimit)-float64(cpuUsed)) > tolerance*float64(cpuUsed) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -711,7 +711,7 @@ metadata:
|
||||
ginkgo.By("trying to use kubectl with invalid token")
|
||||
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1")
|
||||
e2elog.Logf("got err %v", err)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
|
||||
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
|
||||
gomega.Expect(err).To(gomega.ContainSubstring("Authorization: Bearer invalid"))
|
||||
@@ -720,7 +720,7 @@ metadata:
|
||||
ginkgo.By("trying to use kubectl with invalid server")
|
||||
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1")
|
||||
e2elog.Logf("got err %v", err)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
gomega.Expect(err).To(gomega.ContainSubstring("Unable to connect to the server"))
|
||||
gomega.Expect(err).To(gomega.ContainSubstring("GET http://invalid/api"))
|
||||
|
||||
@@ -945,9 +945,6 @@ metadata:
|
||||
Description: Deploy a redis controller and a redis service. Kubectl describe pods SHOULD return the name, namespace, labels, state and other information as expected. Kubectl describe on rc, service, node and namespace SHOULD also return proper information.
|
||||
*/
|
||||
framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods ", func() {
|
||||
kv, err := framework.KubectlVersion()
|
||||
framework.ExpectNoError(err)
|
||||
framework.SkipUnlessServerVersionGTE(kv, c.Discovery())
|
||||
controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename)))
|
||||
serviceJSON := readTestFileOrDie(redisServiceFilename)
|
||||
|
||||
@@ -1717,7 +1714,7 @@ metadata:
|
||||
|
||||
ginkgo.By("verifying the job " + jobName + " was deleted")
|
||||
_, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
gomega.Expect(apierrs.IsNotFound(err)).To(gomega.BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -346,7 +346,7 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework)
|
||||
|
||||
func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
|
||||
config, err := framework.LoadConfig()
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config")
|
||||
framework.ExpectNoError(err, "unable to get base config")
|
||||
|
||||
ginkgo.By("Creating the pod")
|
||||
pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress))
|
||||
|
||||
@@ -227,7 +227,7 @@ var _ = SIGDescribe("Addon update", func() {
|
||||
|
||||
var err error
|
||||
sshClient, err = getMasterSSHClient()
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get the master SSH client.")
|
||||
framework.ExpectNoError(err, "Failed to get the master SSH client.")
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
@@ -275,7 +275,7 @@ var _ = SIGDescribe("Addon update", func() {
|
||||
|
||||
for _, p := range remoteFiles {
|
||||
err := writeRemoteFile(sshClient, p.data, temporaryRemotePath, p.fileName, 0644)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to write file %q at remote path %q with ssh client %+v", p.fileName, temporaryRemotePath, sshClient)
|
||||
framework.ExpectNoError(err, "Failed to write file %q at remote path %q with ssh client %+v", p.fileName, temporaryRemotePath, sshClient)
|
||||
}
|
||||
|
||||
// directory on kubernetes-master
|
||||
@@ -284,7 +284,7 @@ var _ = SIGDescribe("Addon update", func() {
|
||||
|
||||
// cleanup from previous tests
|
||||
_, _, _, err := sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to remove remote dir %q with ssh client %+v", destinationDirPrefix, sshClient)
|
||||
framework.ExpectNoError(err, "Failed to remove remote dir %q with ssh client %+v", destinationDirPrefix, sshClient)
|
||||
|
||||
defer sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix)) // ignore result in cleanup
|
||||
sshExecAndVerify(sshClient, fmt.Sprintf("sudo mkdir -p %s", destinationDir))
|
||||
@@ -300,7 +300,8 @@ var _ = SIGDescribe("Addon update", func() {
|
||||
// Delete the "ensure exist class" addon at the end.
|
||||
defer func() {
|
||||
e2elog.Logf("Cleaning up ensure exist class addon.")
|
||||
gomega.Expect(f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)).NotTo(gomega.HaveOccurred())
|
||||
err := f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)
|
||||
framework.ExpectNoError(err)
|
||||
}()
|
||||
|
||||
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", true)
|
||||
@@ -334,7 +335,7 @@ var _ = SIGDescribe("Addon update", func() {
|
||||
|
||||
ginkgo.By("verify invalid addons weren't created")
|
||||
_, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get("invalid-addon-test", metav1.GetOptions{})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
framework.ExpectError(err)
|
||||
|
||||
// Invalid addon manifests and the "ensure exist class" addon will be deleted by the deferred function.
|
||||
})
|
||||
@@ -386,7 +387,7 @@ func getMasterSSHClient() (*ssh.Client, error) {
|
||||
|
||||
func sshExecAndVerify(client *ssh.Client, cmd string) {
|
||||
_, _, rc, err := sshExec(client, cmd)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to execute %q with ssh client %+v", cmd, client)
|
||||
framework.ExpectNoError(err, "Failed to execute %q with ssh client %+v", cmd, client)
|
||||
gomega.Expect(rc).To(gomega.Equal(0), "error return code from executing command on the cluster: %s", cmd)
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
|
||||
if len(secretNeedClean) > 0 {
|
||||
By("delete the bootstrap token secret")
|
||||
err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(secretNeedClean, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
secretNeedClean = ""
|
||||
}
|
||||
})
|
||||
@@ -52,22 +52,22 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
|
||||
It("should sign the new added bootstrap tokens", func() {
|
||||
By("create a new bootstrap token secret")
|
||||
tokenId, err := GenerateTokenId()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
secret := newTokenSecret(tokenId, "tokenSecret")
|
||||
_, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret)
|
||||
secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenId
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("wait for the bootstrap token secret be signed")
|
||||
err = WaitforSignedClusterInfoByBootStrapToken(c, tokenId)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
It("should resign the bootstrap tokens when the clusterInfo ConfigMap updated [Serial][Disruptive]", func() {
|
||||
By("create a new bootstrap token secret")
|
||||
tokenId, err := GenerateTokenId()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
secret := newTokenSecret(tokenId, "tokenSecret")
|
||||
secret, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret)
|
||||
secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenId
|
||||
@@ -76,49 +76,49 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
|
||||
err = WaitforSignedClusterInfoByBootStrapToken(c, tokenId)
|
||||
|
||||
cfgMap, err := f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
signedToken, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenId]
|
||||
Expect(ok).Should(Equal(true))
|
||||
|
||||
By("update the cluster-info ConfigMap")
|
||||
originalData := cfgMap.Data[bootstrapapi.KubeConfigKey]
|
||||
updatedKubeConfig, err := randBytes(20)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
cfgMap.Data[bootstrapapi.KubeConfigKey] = updatedKubeConfig
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(cfgMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
By("update back the cluster-info ConfigMap")
|
||||
cfgMap, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
cfgMap.Data[bootstrapapi.KubeConfigKey] = originalData
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(cfgMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}()
|
||||
|
||||
By("wait for signed bootstrap token updated")
|
||||
err = WaitForSignedClusterInfoGetUpdatedByBootstrapToken(c, tokenId, signedToken)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
It("should delete the signed bootstrap tokens from clusterInfo ConfigMap when bootstrap token is deleted", func() {
|
||||
By("create a new bootstrap token secret")
|
||||
tokenId, err := GenerateTokenId()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
secret := newTokenSecret(tokenId, "tokenSecret")
|
||||
_, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("wait for the bootstrap secret be signed")
|
||||
err = WaitforSignedClusterInfoByBootStrapToken(c, tokenId)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("delete the bootstrap token secret")
|
||||
err = c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(bootstrapapi.BootstrapTokenSecretPrefix+tokenId, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("wait for the bootstrap token removed from cluster-info ConfigMap")
|
||||
err = WaitForSignedClusterInfoByBootstrapTokenToDisappear(c, tokenId)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -45,41 +44,41 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
|
||||
By("delete the bootstrap token secret")
|
||||
err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(secretNeedClean, &metav1.DeleteOptions{})
|
||||
secretNeedClean = ""
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
})
|
||||
It("should delete the token secret when the secret expired", func() {
|
||||
By("create a new expired bootstrap token secret")
|
||||
tokenId, err := GenerateTokenId()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
tokenSecret, err := GenerateTokenSecret()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
secret := newTokenSecret(tokenId, tokenSecret)
|
||||
addSecretExpiration(secret, TimeStringFromNow(-time.Hour))
|
||||
_, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("wait for the bootstrap token secret be deleted")
|
||||
err = WaitForBootstrapTokenSecretToDisappear(c, tokenId)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
It("should not delete the token secret when the secret is not expired", func() {
|
||||
By("create a new expired bootstrap token secret")
|
||||
tokenId, err := GenerateTokenId()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
tokenSecret, err := GenerateTokenSecret()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
secret := newTokenSecret(tokenId, tokenSecret)
|
||||
addSecretExpiration(secret, TimeStringFromNow(time.Hour))
|
||||
_, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret)
|
||||
secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenId
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("wait for the bootstrap token secret not be deleted")
|
||||
err = WaitForBootstrapTokenSecretNotDisappear(c, tokenId, 20*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -46,7 +46,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
||||
// make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy
|
||||
ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func() {
|
||||
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
var statusCode int
|
||||
result.StatusCode(&statusCode)
|
||||
@@ -54,7 +54,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
||||
})
|
||||
ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() {
|
||||
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "containers/", 4194)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
var statusCode int
|
||||
result.StatusCode(&statusCode)
|
||||
|
||||
@@ -35,7 +35,6 @@ import (
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -70,7 +69,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
||||
namespaceName := metav1.NamespaceSystem
|
||||
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName))
|
||||
events, err := f.ClientSet.CoreV1().Events(namespaceName).List(metav1.ListOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
for _, e := range events.Items {
|
||||
e2elog.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
|
||||
@@ -51,7 +50,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
systemPodsNo = int32(len(systemPods))
|
||||
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
||||
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||
@@ -104,7 +103,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
// the cluster is restored to health.
|
||||
ginkgo.By("waiting for system pods to successfully restart")
|
||||
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("should be able to delete nodes", func() {
|
||||
@@ -112,20 +111,20 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||
name := "my-hostname-delete-node"
|
||||
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
originalNodeCount = int32(numNodes)
|
||||
common.NewRCByName(c, ns, name, originalNodeCount, nil)
|
||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)
|
||||
ginkgo.By(fmt.Sprintf("decreasing cluster size to %d", targetNumNodes))
|
||||
err = framework.ResizeGroup(group, targetNumNodes)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = framework.WaitForGroupSize(group, targetNumNodes)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = framework.WaitForReadyNodes(c, int(originalNodeCount-1), 10*time.Minute)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on " +
|
||||
"the now non-existent node and the RC to recreate it")
|
||||
@@ -133,7 +132,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
|
||||
ginkgo.By("verifying whether the pods from the removed node are recreated")
|
||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
// TODO: Bug here - testName is not correct
|
||||
@@ -143,26 +142,26 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
name := "my-hostname-add-node"
|
||||
common.NewSVCByName(c, ns, name)
|
||||
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
originalNodeCount = int32(numNodes)
|
||||
common.NewRCByName(c, ns, name, originalNodeCount, nil)
|
||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1)
|
||||
ginkgo.By(fmt.Sprintf("increasing cluster size to %d", targetNumNodes))
|
||||
err = framework.ResizeGroup(group, targetNumNodes)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = framework.WaitForGroupSize(group, targetNumNodes)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = framework.WaitForReadyNodes(c, int(originalNodeCount+1), 10*time.Minute)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1))
|
||||
err = resizeRC(c, ns, name, originalNodeCount+1)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount+1)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func nodeNames(nodes []v1.Node) []string {
|
||||
@@ -54,14 +53,14 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
var err error
|
||||
ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
numNodes, err = framework.NumberOfRegisteredNodes(f.ClientSet)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
systemNamespace = metav1.NamespaceSystem
|
||||
|
||||
ginkgo.By("ensuring all nodes are ready")
|
||||
originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
e2elog.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
|
||||
|
||||
ginkgo.By("ensuring all pods are running and ready")
|
||||
@@ -87,11 +86,11 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
|
||||
ginkgo.It("should restart all nodes and ensure all nodes and pods recover", func() {
|
||||
ginkgo.By("restarting all of the nodes")
|
||||
err := common.RestartNodes(f.ClientSet, originalNodes)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("ensuring all nodes are ready after the restart")
|
||||
nodesAfter, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
e2elog.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))
|
||||
|
||||
// Make sure that we have the same number of nodes. We're not checking
|
||||
@@ -108,7 +107,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
|
||||
ginkgo.By("ensuring the same number of pods are running and ready after restart")
|
||||
podCheckStart := time.Now()
|
||||
podNamesAfter, err := framework.WaitForNRestartablePods(ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
|
||||
if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) {
|
||||
pods := ps.List()
|
||||
|
||||
@@ -13,7 +13,7 @@ go_library(
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/networking/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
@@ -41,5 +41,5 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["manifest_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//staging/src/k8s.io/api/extensions/v1beta1:go_default_library"],
|
||||
deps = ["//staging/src/k8s.io/api/networking/v1beta1:go_default_library"],
|
||||
)
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -87,8 +87,8 @@ func SvcFromManifest(fileName string) (*v1.Service, error) {
|
||||
}
|
||||
|
||||
// IngressFromManifest reads a .json/yaml file and returns the ingress in it.
|
||||
func IngressFromManifest(fileName string) (*extensions.Ingress, error) {
|
||||
var ing extensions.Ingress
|
||||
func IngressFromManifest(fileName string) (*networkingv1beta1.Ingress, error) {
|
||||
var ing networkingv1beta1.Ingress
|
||||
data, err := testfiles.Read(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -106,8 +106,8 @@ func IngressFromManifest(fileName string) (*extensions.Ingress, error) {
|
||||
|
||||
// IngressToManifest generates a yaml file in the given path with the given ingress.
|
||||
// Assumes that a directory exists at the given path.
|
||||
func IngressToManifest(ing *extensions.Ingress, path string) error {
|
||||
serialized, err := marshalToYaml(ing, extensions.SchemeGroupVersion)
|
||||
func IngressToManifest(ing *networkingv1beta1.Ingress, path string) error {
|
||||
serialized, err := marshalToYaml(ing, networkingv1beta1.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal ingress %v to YAML: %v", ing, err)
|
||||
}
|
||||
|
||||
@@ -22,11 +22,11 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||
)
|
||||
|
||||
func TestIngressToManifest(t *testing.T) {
|
||||
ing := &extensions.Ingress{}
|
||||
ing := &networkingv1beta1.Ingress{}
|
||||
// Create a temp dir.
|
||||
tmpDir, err := ioutil.TempDir("", "kubemci")
|
||||
if err != nil {
|
||||
|
||||
@@ -27,8 +27,8 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const dnsTestPodHostName = "dns-querier-1"
|
||||
@@ -60,16 +60,17 @@ var _ = SIGDescribe("DNS", func() {
|
||||
}
|
||||
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
ginkgo.By("creating a pod to probe DNS")
|
||||
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
|
||||
})
|
||||
|
||||
It("should resolve DNS of partial qualified names for the cluster ", func() {
|
||||
// [LinuxOnly]: As Windows currently does not support resolving PQDNs.
|
||||
ginkgo.It("should resolve DNS of partial qualified names for the cluster [LinuxOnly]", func() {
|
||||
// All the names we need to be able to resolve.
|
||||
// TODO: Spin up a separate test service and test that dns works for that service.
|
||||
namesToResolve := []string{
|
||||
@@ -89,11 +90,11 @@ var _ = SIGDescribe("DNS", func() {
|
||||
hostEntries := []string{hostFQDN, dnsTestPodHostName}
|
||||
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostEntries, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostEntries, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
ginkgo.By("creating a pod to probe DNS")
|
||||
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
|
||||
})
|
||||
@@ -108,11 +109,11 @@ var _ = SIGDescribe("DNS", func() {
|
||||
hostEntries := []string{hostFQDN, dnsTestPodHostName}
|
||||
wheezyProbeCmd, wheezyFileNames := createProbeCommand(nil, hostEntries, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
jessieProbeCmd, jessieFileNames := createProbeCommand(nil, hostEntries, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
|
||||
// Run a pod which probes /etc/hosts and exposes the results by HTTP.
|
||||
By("creating a pod to probe /etc/hosts")
|
||||
ginkgo.By("creating a pod to probe /etc/hosts")
|
||||
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
|
||||
})
|
||||
@@ -125,27 +126,27 @@ var _ = SIGDescribe("DNS", func() {
|
||||
framework.ConformanceIt("should provide DNS for services ", func() {
|
||||
// NOTE: This only contains the FQDN and the Host name, for testing partial name, see the test below
|
||||
// Create a test headless service.
|
||||
By("Creating a test headless service")
|
||||
ginkgo.By("Creating a test headless service")
|
||||
testServiceSelector := map[string]string{
|
||||
"dns-test": "true",
|
||||
}
|
||||
headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName)
|
||||
defer func() {
|
||||
By("deleting the test headless service")
|
||||
defer GinkgoRecover()
|
||||
ginkgo.By("deleting the test headless service")
|
||||
defer ginkgo.GinkgoRecover()
|
||||
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
|
||||
}()
|
||||
|
||||
regularServiceName := "test-service-2"
|
||||
regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
|
||||
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create regular service: %s", regularServiceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create regular service: %s", regularServiceName)
|
||||
|
||||
defer func() {
|
||||
By("deleting the test service")
|
||||
defer GinkgoRecover()
|
||||
ginkgo.By("deleting the test service")
|
||||
defer ginkgo.GinkgoRecover()
|
||||
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(regularService.Name, nil)
|
||||
}()
|
||||
|
||||
@@ -160,39 +161,40 @@ var _ = SIGDescribe("DNS", func() {
|
||||
|
||||
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
ginkgo.By("creating a pod to probe DNS")
|
||||
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
pod.ObjectMeta.Labels = testServiceSelector
|
||||
|
||||
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
|
||||
})
|
||||
|
||||
It("should resolve DNS of partial qualified names for services ", func() {
|
||||
// [LinuxOnly]: As Windows currently does not support resolving PQDNs.
|
||||
ginkgo.It("should resolve DNS of partial qualified names for services [LinuxOnly]", func() {
|
||||
// Create a test headless service.
|
||||
By("Creating a test headless service")
|
||||
ginkgo.By("Creating a test headless service")
|
||||
testServiceSelector := map[string]string{
|
||||
"dns-test": "true",
|
||||
}
|
||||
headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName)
|
||||
defer func() {
|
||||
By("deleting the test headless service")
|
||||
defer GinkgoRecover()
|
||||
ginkgo.By("deleting the test headless service")
|
||||
defer ginkgo.GinkgoRecover()
|
||||
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
|
||||
}()
|
||||
|
||||
regularServiceName := "test-service-2"
|
||||
regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
|
||||
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create regular service: %s", regularServiceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create regular service: %s", regularServiceName)
|
||||
defer func() {
|
||||
By("deleting the test service")
|
||||
defer GinkgoRecover()
|
||||
ginkgo.By("deleting the test service")
|
||||
defer ginkgo.GinkgoRecover()
|
||||
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(regularService.Name, nil)
|
||||
}()
|
||||
|
||||
@@ -209,20 +211,20 @@ var _ = SIGDescribe("DNS", func() {
|
||||
|
||||
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
ginkgo.By("creating a pod to probe DNS")
|
||||
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
pod.ObjectMeta.Labels = testServiceSelector
|
||||
|
||||
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
|
||||
})
|
||||
|
||||
It("should provide DNS for pods for Hostname [LinuxOnly]", func() {
|
||||
ginkgo.It("should provide DNS for pods for Hostname [LinuxOnly]", func() {
|
||||
// Create a test headless service.
|
||||
By("Creating a test headless service")
|
||||
ginkgo.By("Creating a test headless service")
|
||||
testServiceSelector := map[string]string{
|
||||
"dns-test-hostname-attribute": "true",
|
||||
}
|
||||
@@ -230,11 +232,11 @@ var _ = SIGDescribe("DNS", func() {
|
||||
podHostname := "dns-querier-2"
|
||||
headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector)
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", serviceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", serviceName)
|
||||
|
||||
defer func() {
|
||||
By("deleting the test headless service")
|
||||
defer GinkgoRecover()
|
||||
ginkgo.By("deleting the test headless service")
|
||||
defer ginkgo.GinkgoRecover()
|
||||
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
|
||||
}()
|
||||
|
||||
@@ -242,11 +244,11 @@ var _ = SIGDescribe("DNS", func() {
|
||||
hostNames := []string{hostFQDN, podHostname}
|
||||
wheezyProbeCmd, wheezyFileNames := createProbeCommand(nil, hostNames, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
jessieProbeCmd, jessieFileNames := createProbeCommand(nil, hostNames, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
ginkgo.By("creating a pod to probe DNS")
|
||||
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
pod1.ObjectMeta.Labels = testServiceSelector
|
||||
pod1.Spec.Hostname = podHostname
|
||||
@@ -255,9 +257,9 @@ var _ = SIGDescribe("DNS", func() {
|
||||
validateDNSResults(f, pod1, append(wheezyFileNames, jessieFileNames...))
|
||||
})
|
||||
|
||||
It("should provide DNS for pods for Subdomain", func() {
|
||||
ginkgo.It("should provide DNS for pods for Subdomain", func() {
|
||||
// Create a test headless service.
|
||||
By("Creating a test headless service")
|
||||
ginkgo.By("Creating a test headless service")
|
||||
testServiceSelector := map[string]string{
|
||||
"dns-test-hostname-attribute": "true",
|
||||
}
|
||||
@@ -265,23 +267,24 @@ var _ = SIGDescribe("DNS", func() {
|
||||
podHostname := "dns-querier-2"
|
||||
headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector)
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", serviceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", serviceName)
|
||||
|
||||
defer func() {
|
||||
By("deleting the test headless service")
|
||||
defer GinkgoRecover()
|
||||
ginkgo.By("deleting the test headless service")
|
||||
defer ginkgo.GinkgoRecover()
|
||||
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
|
||||
}()
|
||||
|
||||
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.%s", podHostname, serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
namesToResolve := []string{hostFQDN}
|
||||
subdomain := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
namesToResolve := []string{hostFQDN, subdomain}
|
||||
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
ginkgo.By("creating a pod to probe DNS")
|
||||
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
pod1.ObjectMeta.Labels = testServiceSelector
|
||||
pod1.Spec.Hostname = podHostname
|
||||
@@ -298,72 +301,72 @@ var _ = SIGDescribe("DNS", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should provide DNS for ExternalName services", func() {
|
||||
// Create a test ExternalName service.
|
||||
By("Creating a test externalName service")
|
||||
ginkgo.By("Creating a test externalName service")
|
||||
serviceName := "dns-test-service-3"
|
||||
externalNameService := framework.CreateServiceSpec(serviceName, "foo.example.com", false, nil)
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create ExternalName service: %s", serviceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create ExternalName service: %s", serviceName)
|
||||
|
||||
defer func() {
|
||||
By("deleting the test externalName service")
|
||||
defer GinkgoRecover()
|
||||
ginkgo.By("deleting the test externalName service")
|
||||
defer ginkgo.GinkgoRecover()
|
||||
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil)
|
||||
}()
|
||||
hostFQDN := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
|
||||
jessieProbeCmd, jessieFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a pod to probe DNS")
|
||||
ginkgo.By("creating a pod to probe DNS")
|
||||
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
|
||||
validateTargetedProbeOutput(f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.")
|
||||
|
||||
// Test changing the externalName field
|
||||
By("changing the externalName to bar.example.com")
|
||||
ginkgo.By("changing the externalName to bar.example.com")
|
||||
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
|
||||
s.Spec.ExternalName = "bar.example.com"
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to change externalName of service: %s", serviceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to change externalName of service: %s", serviceName)
|
||||
wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
|
||||
jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a second pod to probe DNS")
|
||||
ginkgo.By("creating a second pod to probe DNS")
|
||||
pod2 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
|
||||
validateTargetedProbeOutput(f, pod2, []string{wheezyFileName, jessieFileName}, "bar.example.com.")
|
||||
|
||||
// Test changing type from ExternalName to ClusterIP
|
||||
By("changing the service to type=ClusterIP")
|
||||
ginkgo.By("changing the service to type=ClusterIP")
|
||||
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeClusterIP
|
||||
s.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
|
||||
}
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to change service type to ClusterIP for service: %s", serviceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to change service type to ClusterIP for service: %s", serviceName)
|
||||
wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "A", "wheezy")
|
||||
jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "A", "jessie")
|
||||
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
|
||||
ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
|
||||
|
||||
// Run a pod which probes DNS and exposes the results by HTTP.
|
||||
By("creating a third pod to probe DNS")
|
||||
ginkgo.By("creating a third pod to probe DNS")
|
||||
pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
|
||||
|
||||
svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(externalNameService.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get service: %s", externalNameService.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get service: %s", externalNameService.Name)
|
||||
|
||||
validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP)
|
||||
})
|
||||
|
||||
It("should support configurable pod DNS nameservers", func() {
|
||||
By("Creating a pod with dnsPolicy=None and customized dnsConfig...")
|
||||
ginkgo.It("should support configurable pod DNS nameservers", func() {
|
||||
ginkgo.By("Creating a pod with dnsPolicy=None and customized dnsConfig...")
|
||||
testServerIP := "1.1.1.1"
|
||||
testSearchPath := "resolv.conf.local"
|
||||
testAgnhostPod := f.NewAgnhostPod(f.Namespace.Name, "pause")
|
||||
@@ -373,15 +376,15 @@ var _ = SIGDescribe("DNS", func() {
|
||||
Searches: []string{testSearchPath},
|
||||
}
|
||||
testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testAgnhostPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testAgnhostPod.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testAgnhostPod.Name)
|
||||
framework.Logf("Created pod %v", testAgnhostPod)
|
||||
defer func() {
|
||||
framework.Logf("Deleting pod %s...", testAgnhostPod.Name)
|
||||
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Failf("Failed to delete pod %s: %v", testAgnhostPod.Name, err)
|
||||
framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err)
|
||||
}
|
||||
}()
|
||||
Expect(f.WaitForPodRunning(testAgnhostPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testAgnhostPod.Name)
|
||||
gomega.Expect(f.WaitForPodRunning(testAgnhostPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testAgnhostPod.Name)
|
||||
|
||||
runCommand := func(arg string) string {
|
||||
cmd := []string{"/agnhost", arg}
|
||||
@@ -393,25 +396,25 @@ var _ = SIGDescribe("DNS", func() {
|
||||
CaptureStdout: true,
|
||||
CaptureStderr: true,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to run command '/agnhost %s' on pod, stdout: %v, stderr: %v, err: %v", arg, stdout, stderr, err)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to run command '/agnhost %s' on pod, stdout: %v, stderr: %v, err: %v", arg, stdout, stderr, err)
|
||||
return stdout
|
||||
}
|
||||
|
||||
By("Verifying customized DNS suffix list is configured on pod...")
|
||||
ginkgo.By("Verifying customized DNS suffix list is configured on pod...")
|
||||
stdout := runCommand("dns-suffix")
|
||||
if !strings.Contains(stdout, testSearchPath) {
|
||||
framework.Failf("customized DNS suffix list not found configured in pod, expected to contain: %s, got: %s", testSearchPath, stdout)
|
||||
}
|
||||
|
||||
By("Verifying customized DNS server is configured on pod...")
|
||||
ginkgo.By("Verifying customized DNS server is configured on pod...")
|
||||
stdout = runCommand("dns-server-list")
|
||||
if !strings.Contains(stdout, testServerIP) {
|
||||
framework.Failf("customized DNS server not found in configured in pod, expected to contain: %s, got: %s", testServerIP, stdout)
|
||||
}
|
||||
})
|
||||
|
||||
It("should support configurable pod resolv.conf", func() {
|
||||
By("Preparing a test DNS service with injected DNS names...")
|
||||
ginkgo.It("should support configurable pod resolv.conf", func() {
|
||||
ginkgo.By("Preparing a test DNS service with injected DNS names...")
|
||||
testInjectedIP := "1.1.1.1"
|
||||
testDNSNameShort := "notexistname"
|
||||
testSearchPath := "resolv.conf.local"
|
||||
@@ -421,23 +424,23 @@ var _ = SIGDescribe("DNS", func() {
|
||||
testDNSNameFull: testInjectedIP,
|
||||
})
|
||||
testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testServerPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testServerPod.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testServerPod.Name)
|
||||
e2elog.Logf("Created pod %v", testServerPod)
|
||||
defer func() {
|
||||
e2elog.Logf("Deleting pod %s...", testServerPod.Name)
|
||||
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Failf("Failed to delete pod %s: %v", testServerPod.Name, err)
|
||||
framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err)
|
||||
}
|
||||
}()
|
||||
Expect(f.WaitForPodRunning(testServerPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testServerPod.Name)
|
||||
gomega.Expect(f.WaitForPodRunning(testServerPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testServerPod.Name)
|
||||
|
||||
// Retrieve server pod IP.
|
||||
testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(testServerPod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod %v", testServerPod.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod %v", testServerPod.Name)
|
||||
testServerIP := testServerPod.Status.PodIP
|
||||
e2elog.Logf("testServerIP is %s", testServerIP)
|
||||
|
||||
By("Creating a pod with dnsPolicy=None and customized dnsConfig...")
|
||||
ginkgo.By("Creating a pod with dnsPolicy=None and customized dnsConfig...")
|
||||
testUtilsPod := generateDNSUtilsPod()
|
||||
testUtilsPod.Spec.DNSPolicy = v1.DNSNone
|
||||
testNdotsValue := "2"
|
||||
@@ -452,17 +455,17 @@ var _ = SIGDescribe("DNS", func() {
|
||||
},
|
||||
}
|
||||
testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testUtilsPod.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testUtilsPod.Name)
|
||||
e2elog.Logf("Created pod %v", testUtilsPod)
|
||||
defer func() {
|
||||
e2elog.Logf("Deleting pod %s...", testUtilsPod.Name)
|
||||
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Failf("Failed to delete pod %s: %v", testUtilsPod.Name, err)
|
||||
framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err)
|
||||
}
|
||||
}()
|
||||
Expect(f.WaitForPodRunning(testUtilsPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testUtilsPod.Name)
|
||||
gomega.Expect(f.WaitForPodRunning(testUtilsPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testUtilsPod.Name)
|
||||
|
||||
By("Verifying customized DNS option is configured on pod...")
|
||||
ginkgo.By("Verifying customized DNS option is configured on pod...")
|
||||
// TODO: Figure out a better way other than checking the actual resolv,conf file.
|
||||
cmd := []string{"cat", "/etc/resolv.conf"}
|
||||
stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{
|
||||
@@ -473,12 +476,12 @@ var _ = SIGDescribe("DNS", func() {
|
||||
CaptureStdout: true,
|
||||
CaptureStderr: true,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err)
|
||||
if !strings.Contains(stdout, "ndots:2") {
|
||||
framework.Failf("customized DNS options not found in resolv.conf, got: %s", stdout)
|
||||
}
|
||||
|
||||
By("Verifying customized name server and search path are working...")
|
||||
ginkgo.By("Verifying customized name server and search path are working...")
|
||||
// Do dig on not-exist-dns-name and see if the injected DNS record is returned.
|
||||
// This verifies both:
|
||||
// - Custom search path is appended.
|
||||
@@ -494,7 +497,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
CaptureStderr: true,
|
||||
})
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to execute dig command, stdout:%v, stderr: %v, err: %v", stdout, stderr, err)
|
||||
e2elog.Logf("ginkgo.Failed to execute dig command, stdout:%v, stderr: %v, err: %v", stdout, stderr, err)
|
||||
return false, nil
|
||||
}
|
||||
res := strings.Split(stdout, "\n")
|
||||
@@ -505,7 +508,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
return true, nil
|
||||
}
|
||||
err = wait.PollImmediate(5*time.Second, 3*time.Minute, digFunc)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to verify customized name server and search path")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to verify customized name server and search path")
|
||||
|
||||
// TODO: Add more test cases for other DNSPolicies.
|
||||
})
|
||||
|
||||
@@ -35,8 +35,8 @@ import (
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
type dnsTestCommon struct {
|
||||
@@ -62,14 +62,14 @@ func newDNSTestCommon() dnsTestCommon {
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) init() {
|
||||
By("Finding a DNS pod")
|
||||
ginkgo.By("Finding a DNS pod")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
|
||||
namespace := "kube-system"
|
||||
pods, err := t.f.ClientSet.CoreV1().Pods(namespace).List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", namespace)
|
||||
Expect(len(pods.Items)).Should(BeNumerically(">=", 1))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s", namespace)
|
||||
gomega.Expect(len(pods.Items)).Should(gomega.BeNumerically(">=", 1))
|
||||
|
||||
t.dnsPod = &pods.Items[0]
|
||||
e2elog.Logf("Using DNS pod: %v", t.dnsPod.Name)
|
||||
@@ -157,23 +157,23 @@ func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) {
|
||||
}.AsSelector().String(),
|
||||
}
|
||||
cmList, err := t.c.CoreV1().ConfigMaps(t.ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list ConfigMaps in namespace: %s", t.ns)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list ConfigMaps in namespace: %s", t.ns)
|
||||
|
||||
if len(cmList.Items) == 0 {
|
||||
By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm))
|
||||
ginkgo.By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm))
|
||||
_, err := t.c.CoreV1().ConfigMaps(t.ns).Create(cm)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)
|
||||
} else {
|
||||
By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm))
|
||||
ginkgo.By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm))
|
||||
_, err := t.c.CoreV1().ConfigMaps(t.ns).Update(cm)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) fetchDNSConfigMapData() map[string]string {
|
||||
if t.name == "coredns" {
|
||||
pcm, err := t.c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(t.name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get DNS ConfigMap: %s", t.name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get DNS ConfigMap: %s", t.name)
|
||||
return pcm.Data
|
||||
}
|
||||
return nil
|
||||
@@ -189,10 +189,10 @@ func (t *dnsTestCommon) restoreDNSConfigMap(configMapData map[string]string) {
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) deleteConfigMap() {
|
||||
By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name))
|
||||
ginkgo.By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name))
|
||||
t.cm = nil
|
||||
err := t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete config map: %s", t.name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete config map: %s", t.name)
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
|
||||
@@ -224,9 +224,9 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
|
||||
|
||||
var err error
|
||||
t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.utilPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %v", t.utilPod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %v", t.utilPod)
|
||||
e2elog.Logf("Created pod %v", t.utilPod)
|
||||
Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(HaveOccurred(), "pod failed to start running: %v", t.utilPod)
|
||||
gomega.Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(gomega.HaveOccurred(), "pod failed to start running: %v", t.utilPod)
|
||||
|
||||
t.utilService = &v1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -249,7 +249,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
|
||||
}
|
||||
|
||||
t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(t.utilService)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name)
|
||||
e2elog.Logf("Created service %v", t.utilService)
|
||||
}
|
||||
|
||||
@@ -272,7 +272,7 @@ func (t *dnsTestCommon) deleteCoreDNSPods() {
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s", pod.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s", pod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -315,13 +315,13 @@ func (t *dnsTestCommon) createDNSPodFromObj(pod *v1.Pod) {
|
||||
|
||||
var err error
|
||||
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %v", t.dnsServerPod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %v", t.dnsServerPod)
|
||||
e2elog.Logf("Created pod %v", t.dnsServerPod)
|
||||
Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(HaveOccurred(), "pod failed to start running: %v", t.dnsServerPod)
|
||||
gomega.Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(gomega.HaveOccurred(), "pod failed to start running: %v", t.dnsServerPod)
|
||||
|
||||
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get(
|
||||
t.dnsServerPod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod: %s", t.dnsServerPod.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod: %s", t.dnsServerPod.Name)
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {
|
||||
@@ -539,30 +539,30 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client
|
||||
e2elog.Logf("Lookups using %s/%s failed for: %v\n", pod.Namespace, pod.Name, failed)
|
||||
return false, nil
|
||||
}))
|
||||
Expect(len(failed)).To(Equal(0))
|
||||
gomega.Expect(len(failed)).To(gomega.Equal(0))
|
||||
}
|
||||
|
||||
func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) {
|
||||
By("submitting the pod to kubernetes")
|
||||
ginkgo.By("submitting the pod to kubernetes")
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
defer func() {
|
||||
By("deleting the pod")
|
||||
defer GinkgoRecover()
|
||||
ginkgo.By("deleting the pod")
|
||||
defer ginkgo.GinkgoRecover()
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
framework.Failf("Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("retrieving the pod")
|
||||
ginkgo.By("retrieving the pod")
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// Try to find results for each expected name.
|
||||
By("looking for the results for each expected name from probers")
|
||||
ginkgo.By("looking for the results for each expected name from probers")
|
||||
assertFilesExist(fileNames, "results", pod, f.ClientSet)
|
||||
|
||||
// TODO: probe from the host, too.
|
||||
@@ -571,26 +571,26 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
|
||||
}
|
||||
|
||||
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
|
||||
By("submitting the pod to kubernetes")
|
||||
ginkgo.By("submitting the pod to kubernetes")
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
defer func() {
|
||||
By("deleting the pod")
|
||||
defer GinkgoRecover()
|
||||
ginkgo.By("deleting the pod")
|
||||
defer ginkgo.GinkgoRecover()
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
framework.Failf("Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("retrieving the pod")
|
||||
ginkgo.By("retrieving the pod")
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
// Try to find the expected value for each expected name.
|
||||
By("looking for the results for each expected name from probers")
|
||||
ginkgo.By("looking for the results for each expected name from probers")
|
||||
assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)
|
||||
|
||||
e2elog.Logf("DNS probes using %s succeeded\n", pod.Name)
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
type dnsFederationsConfigMapTest struct {
|
||||
@@ -45,7 +45,7 @@ var _ = SIGDescribe("DNS configMap federations [Feature:Federation]", func() {
|
||||
|
||||
t := &dnsFederationsConfigMapTest{dnsTestCommon: newDNSTestCommon()}
|
||||
|
||||
It("should be able to change federation configuration [Slow][Serial]", func() {
|
||||
ginkgo.It("should be able to change federation configuration [Slow][Serial]", func() {
|
||||
t.c = t.f.ClientSet
|
||||
t.run()
|
||||
})
|
||||
@@ -96,17 +96,17 @@ func (t *dnsFederationsConfigMapTest) run() {
|
||||
}`, framework.TestContext.ClusterDNSDomain, framework.TestContext.ClusterDNSDomain)}
|
||||
valid2m := map[string]string{t.labels[1]: "xyz.com"}
|
||||
|
||||
By("default -> valid1")
|
||||
ginkgo.By("default -> valid1")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
|
||||
t.deleteCoreDNSPods()
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
By("valid1 -> valid2")
|
||||
ginkgo.By("valid1 -> valid2")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true)
|
||||
t.deleteCoreDNSPods()
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
By("valid2 -> default")
|
||||
ginkgo.By("valid2 -> default")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: originalConfigMapData}, nil, false)
|
||||
t.deleteCoreDNSPods()
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
@@ -121,27 +121,27 @@ func (t *dnsFederationsConfigMapTest) run() {
|
||||
valid2m := map[string]string{t.labels[1]: "xyz"}
|
||||
invalid := map[string]string{"federations": "invalid.map=xyz"}
|
||||
|
||||
By("empty -> valid1")
|
||||
ginkgo.By("empty -> valid1")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
By("valid1 -> valid2")
|
||||
ginkgo.By("valid1 -> valid2")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true)
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
By("valid2 -> invalid")
|
||||
ginkgo.By("valid2 -> invalid")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
By("invalid -> valid1")
|
||||
ginkgo.By("invalid -> valid1")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
By("valid1 -> deleted")
|
||||
ginkgo.By("valid1 -> deleted")
|
||||
t.deleteConfigMap()
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
By("deleted -> invalid")
|
||||
ginkgo.By("deleted -> invalid")
|
||||
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
|
||||
t.validate(framework.TestContext.ClusterDNSDomain)
|
||||
}
|
||||
@@ -151,7 +151,7 @@ func (t *dnsFederationsConfigMapTest) validate(dnsDomain string) {
|
||||
federations := t.fedMap
|
||||
|
||||
if len(federations) == 0 {
|
||||
By(fmt.Sprintf("Validating federation labels %v do not exist", t.labels))
|
||||
ginkgo.By(fmt.Sprintf("Validating federation labels %v do not exist", t.labels))
|
||||
|
||||
for _, label := range t.labels {
|
||||
var federationDNS = fmt.Sprintf("e2e-dns-configmap.%s.%s.svc.%s.",
|
||||
@@ -173,7 +173,7 @@ func (t *dnsFederationsConfigMapTest) validate(dnsDomain string) {
|
||||
// Check local mapping. Checking a remote mapping requires
|
||||
// creating an arbitrary DNS record which is not possible at the
|
||||
// moment.
|
||||
By(fmt.Sprintf("Validating federation record %v", label))
|
||||
ginkgo.By(fmt.Sprintf("Validating federation record %v", label))
|
||||
predicate := func(actual []string) bool {
|
||||
for _, v := range actual {
|
||||
if v == localDNS {
|
||||
@@ -407,16 +407,16 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
|
||||
serviceName := "dns-externalname-upstream-test"
|
||||
externalNameService := framework.CreateServiceSpec(serviceName, googleDNSHostname, false, nil)
|
||||
if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService); err != nil {
|
||||
Fail(fmt.Sprintf("Failed when creating service: %v", err))
|
||||
ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err))
|
||||
}
|
||||
serviceNameLocal := "dns-externalname-upstream-local"
|
||||
externalNameServiceLocal := framework.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil)
|
||||
if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameServiceLocal); err != nil {
|
||||
Fail(fmt.Sprintf("Failed when creating service: %v", err))
|
||||
ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err))
|
||||
}
|
||||
defer func() {
|
||||
By("deleting the test externalName service")
|
||||
defer GinkgoRecover()
|
||||
ginkgo.By("deleting the test externalName service")
|
||||
defer ginkgo.GinkgoRecover()
|
||||
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil)
|
||||
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameServiceLocal.Name, nil)
|
||||
}()
|
||||
@@ -482,28 +482,28 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
|
||||
|
||||
var _ = SIGDescribe("DNS configMap nameserver [IPv4]", func() {
|
||||
|
||||
Context("Change stubDomain", func() {
|
||||
ginkgo.Context("Change stubDomain", func() {
|
||||
nsTest := &dnsNameserverTest{dnsTestCommon: newDNSTestCommon()}
|
||||
|
||||
It("should be able to change stubDomain configuration [Slow][Serial]", func() {
|
||||
ginkgo.It("should be able to change stubDomain configuration [Slow][Serial]", func() {
|
||||
nsTest.c = nsTest.f.ClientSet
|
||||
nsTest.run(false)
|
||||
})
|
||||
})
|
||||
|
||||
Context("Forward PTR lookup", func() {
|
||||
ginkgo.Context("Forward PTR lookup", func() {
|
||||
fwdTest := &dnsPtrFwdTest{dnsTestCommon: newDNSTestCommon()}
|
||||
|
||||
It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() {
|
||||
ginkgo.It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() {
|
||||
fwdTest.c = fwdTest.f.ClientSet
|
||||
fwdTest.run(false)
|
||||
})
|
||||
})
|
||||
|
||||
Context("Forward external name lookup", func() {
|
||||
ginkgo.Context("Forward external name lookup", func() {
|
||||
externalNameTest := &dnsExternalNameTest{dnsTestCommon: newDNSTestCommon()}
|
||||
|
||||
It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() {
|
||||
ginkgo.It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() {
|
||||
externalNameTest.c = externalNameTest.f.ClientSet
|
||||
externalNameTest.run(false)
|
||||
})
|
||||
@@ -512,28 +512,28 @@ var _ = SIGDescribe("DNS configMap nameserver [IPv4]", func() {
|
||||
|
||||
var _ = SIGDescribe("DNS configMap nameserver [Feature:Networking-IPv6]", func() {
|
||||
|
||||
Context("Change stubDomain", func() {
|
||||
ginkgo.Context("Change stubDomain", func() {
|
||||
nsTest := &dnsNameserverTest{dnsTestCommon: newDNSTestCommon()}
|
||||
|
||||
It("should be able to change stubDomain configuration [Slow][Serial]", func() {
|
||||
ginkgo.It("should be able to change stubDomain configuration [Slow][Serial]", func() {
|
||||
nsTest.c = nsTest.f.ClientSet
|
||||
nsTest.run(true)
|
||||
})
|
||||
})
|
||||
|
||||
Context("Forward PTR lookup", func() {
|
||||
ginkgo.Context("Forward PTR lookup", func() {
|
||||
fwdTest := &dnsPtrFwdTest{dnsTestCommon: newDNSTestCommon()}
|
||||
|
||||
It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() {
|
||||
ginkgo.It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() {
|
||||
fwdTest.c = fwdTest.f.ClientSet
|
||||
fwdTest.run(true)
|
||||
})
|
||||
})
|
||||
|
||||
Context("Forward external name lookup", func() {
|
||||
ginkgo.Context("Forward external name lookup", func() {
|
||||
externalNameTest := &dnsExternalNameTest{dnsTestCommon: newDNSTestCommon()}
|
||||
|
||||
It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() {
|
||||
ginkgo.It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() {
|
||||
externalNameTest.c = externalNameTest.f.ClientSet
|
||||
externalNameTest.run(true)
|
||||
})
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,7 +43,7 @@ const (
|
||||
var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
|
||||
f := framework.NewDefaultFramework("performancedns")
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout))
|
||||
framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute)
|
||||
|
||||
@@ -52,7 +52,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
|
||||
})
|
||||
|
||||
// answers dns for service - creates the maximum number of services, and then check dns record for one
|
||||
It("Should answer DNS query for maximum number of services per cluster", func() {
|
||||
ginkgo.It("Should answer DNS query for maximum number of services per cluster", func() {
|
||||
// get integer ceiling of maxServicesPerCluster / maxServicesPerNamespace
|
||||
numNs := (maxServicesPerCluster + maxServicesPerNamespace - 1) / maxServicesPerNamespace
|
||||
|
||||
@@ -64,7 +64,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
|
||||
|
||||
services := generateServicesInNamespaces(namespaces, maxServicesPerCluster)
|
||||
createService := func(i int) {
|
||||
defer GinkgoRecover()
|
||||
defer ginkgo.GinkgoRecover()
|
||||
framework.ExpectNoError(testutils.CreateServiceWithRetries(f.ClientSet, services[i].Namespace, services[i]))
|
||||
}
|
||||
e2elog.Logf("Creating %v test services", maxServicesPerCluster)
|
||||
|
||||
@@ -32,8 +32,8 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -52,11 +52,11 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
f := framework.NewDefaultFramework("cluster-dns")
|
||||
|
||||
var c clientset.Interface
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
})
|
||||
|
||||
It("should create pod that uses dns", func() {
|
||||
ginkgo.It("should create pod that uses dns", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(os.Getenv("GOPATH"), "src/k8s.io/examples/staging/cluster-dns", file)
|
||||
}
|
||||
@@ -84,7 +84,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
var err error
|
||||
namespaceName := fmt.Sprintf("dnsexample%d", i)
|
||||
namespaces[i], err = f.CreateNamespace(namespaceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
@@ -106,13 +106,13 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns.Name).List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", ns.Name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s", ns.Name)
|
||||
err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
|
||||
Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for all pods to respond")
|
||||
e2elog.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
|
||||
|
||||
err = framework.ServiceResponding(c, ns.Name, backendSvcName)
|
||||
Expect(err).NotTo(HaveOccurred(), "waiting for the service to respond")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for the service to respond")
|
||||
}
|
||||
|
||||
// Now another tricky part:
|
||||
@@ -134,7 +134,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
|
||||
queryDNS := fmt.Sprintf(queryDNSPythonTemplate, backendSvcName+"."+namespaces[0].Name)
|
||||
_, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDNS}, "ok", dnsReadyTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for output from pod exec")
|
||||
|
||||
updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, fmt.Sprintf("dns-backend.development.svc.%s", framework.TestContext.ClusterDNSDomain), fmt.Sprintf("dns-backend.%s.svc.%s", namespaces[0].Name, framework.TestContext.ClusterDNSDomain))
|
||||
|
||||
@@ -153,7 +153,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
// wait for pods to print their result
|
||||
for _, ns := range namespaces {
|
||||
_, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "pod %s failed to print result in logs", frontendPodName)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "pod %s failed to print result in logs", frontendPodName)
|
||||
}
|
||||
})
|
||||
})
|
||||
@@ -165,10 +165,10 @@ func getNsCmdFlag(ns *v1.Namespace) string {
|
||||
// pass enough context with the 'old' parameter so that it replaces what your really intended.
|
||||
func prepareResourceWithReplacedString(inputFile, old, new string) string {
|
||||
f, err := os.Open(inputFile)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to open file: %s", inputFile)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to open file: %s", inputFile)
|
||||
defer f.Close()
|
||||
data, err := ioutil.ReadAll(f)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to read from file: %s", inputFile)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to read from file: %s", inputFile)
|
||||
podYaml := strings.Replace(string(data), old, new, 1)
|
||||
return podYaml
|
||||
}
|
||||
|
||||
@@ -30,8 +30,8 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
gcecloud "k8s.io/legacy-cloud-providers/gce"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -49,38 +49,38 @@ var _ = SIGDescribe("Firewall rule", func() {
|
||||
var cloudConfig framework.CloudConfig
|
||||
var gceCloud *gcecloud.Cloud
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
|
||||
var err error
|
||||
cs = f.ClientSet
|
||||
cloudConfig = framework.TestContext.CloudConfig
|
||||
gceCloud, err = gce.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
// This test takes around 6 minutes to run
|
||||
It("[Slow] [Serial] should create valid firewall rules for LoadBalancer type service", func() {
|
||||
ginkgo.It("[Slow] [Serial] should create valid firewall rules for LoadBalancer type service", func() {
|
||||
ns := f.Namespace.Name
|
||||
// This source ranges is just used to examine we have exact same things on LB firewall rules
|
||||
firewallTestSourceRanges := []string{"0.0.0.0/1", "128.0.0.0/1"}
|
||||
serviceName := "firewall-test-loadbalancer"
|
||||
|
||||
By("Getting cluster ID")
|
||||
ginkgo.By("Getting cluster ID")
|
||||
clusterID, err := gce.GetClusterID(cs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Got cluster ID: %v", clusterID)
|
||||
|
||||
jig := framework.NewServiceTestJig(cs, serviceName)
|
||||
nodeList := jig.GetNodes(framework.MaxNodesForEndpointsTests)
|
||||
Expect(nodeList).NotTo(BeNil())
|
||||
gomega.Expect(nodeList).NotTo(gomega.BeNil())
|
||||
nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests)
|
||||
if len(nodesNames) <= 0 {
|
||||
framework.Failf("Expect at least 1 node, got: %v", nodesNames)
|
||||
}
|
||||
nodesSet := sets.NewString(nodesNames...)
|
||||
|
||||
By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global")
|
||||
ginkgo.By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global")
|
||||
svc := jig.CreateLoadBalancerService(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) {
|
||||
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: firewallTestHTTPPort}}
|
||||
svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges
|
||||
@@ -90,61 +90,61 @@ var _ = SIGDescribe("Firewall rule", func() {
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
svc.Spec.LoadBalancerSourceRanges = nil
|
||||
})
|
||||
Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
|
||||
By("Waiting for the local traffic health check firewall rule to be deleted")
|
||||
gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred())
|
||||
ginkgo.By("Waiting for the local traffic health check firewall rule to be deleted")
|
||||
localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false)
|
||||
_, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}()
|
||||
svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP
|
||||
|
||||
By("Checking if service's firewall rule is correct")
|
||||
ginkgo.By("Checking if service's firewall rule is correct")
|
||||
lbFw := gce.ConstructFirewallForLBService(svc, cloudConfig.NodeTag)
|
||||
fw, err := gceCloud.GetFirewall(lbFw.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Checking if service's nodes health check firewall rule is correct")
|
||||
ginkgo.By("Checking if service's nodes health check firewall rule is correct")
|
||||
nodesHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true)
|
||||
fw, err = gceCloud.GetFirewall(nodesHCFw.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE
|
||||
By("Updating LoadBalancer service to ExternalTrafficPolicy=Local")
|
||||
ginkgo.By("Updating LoadBalancer service to ExternalTrafficPolicy=Local")
|
||||
svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
|
||||
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
||||
})
|
||||
|
||||
By("Waiting for the nodes health check firewall rule to be deleted")
|
||||
ginkgo.By("Waiting for the nodes health check firewall rule to be deleted")
|
||||
_, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By("Waiting for the correct local traffic health check firewall rule to be created")
|
||||
ginkgo.By("Waiting for the correct local traffic health check firewall rule to be created")
|
||||
localHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false)
|
||||
fw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests))
|
||||
ginkgo.By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests))
|
||||
for i, nodeName := range nodesNames {
|
||||
podName := fmt.Sprintf("netexec%v", i)
|
||||
jig.LaunchNetexecPodOnNode(f, nodeName, podName, firewallTestHTTPPort, firewallTestUDPPort, true)
|
||||
defer func() {
|
||||
e2elog.Logf("Cleaning up the netexec pod: %v", podName)
|
||||
Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(HaveOccurred())
|
||||
gomega.Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(gomega.HaveOccurred())
|
||||
}()
|
||||
}
|
||||
|
||||
// Send requests from outside of the cluster because internal traffic is whitelisted
|
||||
By("Accessing the external service ip from outside, all non-master nodes should be reached")
|
||||
Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
|
||||
ginkgo.By("Accessing the external service ip from outside, all non-master nodes should be reached")
|
||||
gomega.Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster
|
||||
// by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect
|
||||
// simulation, we really want to check that traffic doesn't reach a vm outside the GKE cluster, but
|
||||
// that's much harder to do in the current e2e framework.
|
||||
By(fmt.Sprintf("Removing tags from one of the nodes: %v", nodesNames[0]))
|
||||
ginkgo.By(fmt.Sprintf("Removing tags from one of the nodes: %v", nodesNames[0]))
|
||||
nodesSet.Delete(nodesNames[0])
|
||||
// Instance could run in a different zone in multi-zone test. Figure out which zone
|
||||
// it is in before proceeding.
|
||||
@@ -154,31 +154,31 @@ var _ = SIGDescribe("Firewall rule", func() {
|
||||
}
|
||||
removedTags := gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{})
|
||||
defer func() {
|
||||
By("Adding tags back to the node and wait till the traffic is recovered")
|
||||
ginkgo.By("Adding tags back to the node and wait till the traffic is recovered")
|
||||
nodesSet.Insert(nodesNames[0])
|
||||
gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)
|
||||
// Make sure traffic is recovered before exit
|
||||
Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
|
||||
gomega.Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(gomega.HaveOccurred())
|
||||
}()
|
||||
|
||||
By("Accessing serivce through the external ip and examine got no response from the node without tags")
|
||||
Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred())
|
||||
ginkgo.By("Accessing serivce through the external ip and examine got no response from the node without tags")
|
||||
gomega.Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet, 15)).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
It("should have correct firewall rules for e2e cluster", func() {
|
||||
ginkgo.It("should have correct firewall rules for e2e cluster", func() {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
||||
if len(nodes.Items) <= 0 {
|
||||
framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items))
|
||||
}
|
||||
|
||||
By("Checking if e2e firewall rules are correct")
|
||||
ginkgo.By("Checking if e2e firewall rules are correct")
|
||||
for _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) {
|
||||
fw, err := gceCloud.GetFirewall(expFw.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
By("Checking well known ports on master and nodes are not exposed externally")
|
||||
ginkgo.By("Checking well known ports on master and nodes are not exposed externally")
|
||||
nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP)
|
||||
if len(nodeAddrs) == 0 {
|
||||
framework.Failf("did not find any node addresses")
|
||||
|
||||
@@ -18,6 +18,7 @@ package network
|
||||
|
||||
import "github.com/onsi/ginkgo"
|
||||
|
||||
// SIGDescribe annotates the test with the SIG label.
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-network] "+text, body)
|
||||
}
|
||||
|
||||
@@ -40,8 +40,8 @@ import (
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -50,7 +50,7 @@ const (
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
defer GinkgoRecover()
|
||||
defer ginkgo.GinkgoRecover()
|
||||
var (
|
||||
ns string
|
||||
jig *ingress.TestJig
|
||||
@@ -58,7 +58,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
)
|
||||
f := framework.NewDefaultFramework("ingress")
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
jig = ingress.NewIngressTestJig(f.ClientSet)
|
||||
ns = f.Namespace.Name
|
||||
|
||||
@@ -81,59 +81,60 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
//
|
||||
// Slow by design ~10m for each "It" block dominated by loadbalancer setup time
|
||||
// TODO: write similar tests for nginx, haproxy and AWS Ingress.
|
||||
Describe("GCE [Slow] [Feature:Ingress]", func() {
|
||||
ginkgo.Describe("GCE [Slow] [Feature:Ingress]", func() {
|
||||
var gceController *gce.IngressController
|
||||
|
||||
// Platform specific setup
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
By("Initializing gce controller")
|
||||
ginkgo.By("Initializing gce controller")
|
||||
gceController = &gce.IngressController{
|
||||
Ns: ns,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
err := gceController.Init()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DescribeIng(ns)
|
||||
}
|
||||
if jig.Ingress == nil {
|
||||
By("No ingress created, no cleanup necessary")
|
||||
ginkgo.By("No ingress created, no cleanup necessary")
|
||||
return
|
||||
}
|
||||
By("Deleting ingress")
|
||||
ginkgo.By("Deleting ingress")
|
||||
jig.TryDeleteIngress()
|
||||
|
||||
By("Cleaning up cloud resources")
|
||||
Expect(gceController.CleanupIngressController()).NotTo(HaveOccurred())
|
||||
ginkgo.By("Cleaning up cloud resources")
|
||||
err := gceController.CleanupIngressController()
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
ginkgo.It("should conform to Ingress spec", func() {
|
||||
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{})
|
||||
for _, t := range conformanceTests {
|
||||
By(t.EntryLog)
|
||||
ginkgo.By(t.EntryLog)
|
||||
t.Execute()
|
||||
By(t.ExitLog)
|
||||
ginkgo.By(t.ExitLog)
|
||||
jig.WaitForIngress(true)
|
||||
}
|
||||
})
|
||||
|
||||
It("should create ingress with pre-shared certificate", func() {
|
||||
ginkgo.It("should create ingress with pre-shared certificate", func() {
|
||||
executePresharedCertTest(f, jig, "")
|
||||
})
|
||||
|
||||
It("should support multiple TLS certs", func() {
|
||||
By("Creating an ingress with no certs.")
|
||||
ginkgo.It("should support multiple TLS certs", func() {
|
||||
ginkgo.By("Creating an ingress with no certs.")
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "multiple-certs"), ns, map[string]string{
|
||||
ingress.IngressStaticIPKey: ns,
|
||||
}, map[string]string{})
|
||||
|
||||
By("Adding multiple certs to the ingress.")
|
||||
ginkgo.By("Adding multiple certs to the ingress.")
|
||||
hosts := []string{"test1.ingress.com", "test2.ingress.com", "test3.ingress.com", "test4.ingress.com"}
|
||||
secrets := []string{"tls-secret-1", "tls-secret-2", "tls-secret-3", "tls-secret-4"}
|
||||
certs := [][]byte{}
|
||||
@@ -143,35 +144,35 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
}
|
||||
for i, host := range hosts {
|
||||
err := jig.WaitForIngressWithCert(true, []string{host}, certs[i])
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
|
||||
}
|
||||
|
||||
By("Remove all but one of the certs on the ingress.")
|
||||
ginkgo.By("Remove all but one of the certs on the ingress.")
|
||||
jig.RemoveHTTPS(secrets[1])
|
||||
jig.RemoveHTTPS(secrets[2])
|
||||
jig.RemoveHTTPS(secrets[3])
|
||||
|
||||
By("Test that the remaining cert is properly served.")
|
||||
ginkgo.By("Test that the remaining cert is properly served.")
|
||||
err := jig.WaitForIngressWithCert(true, []string{hosts[0]}, certs[0])
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
|
||||
|
||||
By("Add back one of the certs that was removed and check that all certs are served.")
|
||||
ginkgo.By("Add back one of the certs that was removed and check that all certs are served.")
|
||||
jig.AddHTTPS(secrets[1], hosts[1])
|
||||
for i, host := range hosts[:2] {
|
||||
err := jig.WaitForIngressWithCert(true, []string{host}, certs[i])
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
|
||||
}
|
||||
})
|
||||
|
||||
It("multicluster ingress should get instance group annotation", func() {
|
||||
ginkgo.It("multicluster ingress should get instance group annotation", func() {
|
||||
name := "echomap"
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, map[string]string{
|
||||
ingress.IngressClassKey: ingress.MulticlusterIngressClassValue,
|
||||
}, map[string]string{})
|
||||
|
||||
By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name))
|
||||
ginkgo.By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name))
|
||||
pollErr := wait.Poll(2*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
annotations := ing.Annotations
|
||||
if annotations == nil || annotations[instanceGroupAnnotation] == "" {
|
||||
@@ -194,7 +195,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
scKey := ingress.StatusPrefix + "/ssl-cert"
|
||||
beKey := ingress.StatusPrefix + "/backends"
|
||||
wait.Poll(2*time.Second, time.Minute, func() (bool, error) {
|
||||
ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
annotations := ing.Annotations
|
||||
if annotations != nil && (annotations[umKey] != "" || annotations[fwKey] != "" ||
|
||||
@@ -237,118 +238,122 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
// zone based on pod labels.
|
||||
})
|
||||
|
||||
Describe("GCE [Slow] [Feature:NEG]", func() {
|
||||
ginkgo.Describe("GCE [Slow] [Feature:NEG]", func() {
|
||||
var gceController *gce.IngressController
|
||||
|
||||
// Platform specific setup
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
By("Initializing gce controller")
|
||||
ginkgo.By("Initializing gce controller")
|
||||
gceController = &gce.IngressController{
|
||||
Ns: ns,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
err := gceController.Init()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DescribeIng(ns)
|
||||
}
|
||||
if jig.Ingress == nil {
|
||||
By("No ingress created, no cleanup necessary")
|
||||
ginkgo.By("No ingress created, no cleanup necessary")
|
||||
return
|
||||
}
|
||||
By("Deleting ingress")
|
||||
ginkgo.By("Deleting ingress")
|
||||
jig.TryDeleteIngress()
|
||||
|
||||
By("Cleaning up cloud resources")
|
||||
Expect(gceController.CleanupIngressController()).NotTo(HaveOccurred())
|
||||
ginkgo.By("Cleaning up cloud resources")
|
||||
err := gceController.CleanupIngressController()
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
ginkgo.It("should conform to Ingress spec", func() {
|
||||
jig.PollInterval = 5 * time.Second
|
||||
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{
|
||||
ingress.NEGAnnotation: `{"ingress": true}`,
|
||||
})
|
||||
for _, t := range conformanceTests {
|
||||
By(t.EntryLog)
|
||||
ginkgo.By(t.EntryLog)
|
||||
t.Execute()
|
||||
By(t.ExitLog)
|
||||
ginkgo.By(t.ExitLog)
|
||||
jig.WaitForIngress(true)
|
||||
Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred())
|
||||
err := gceController.WaitForNegBackendService(jig.GetServicePorts(false))
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
})
|
||||
|
||||
It("should be able to switch between IG and NEG modes", func() {
|
||||
ginkgo.It("should be able to switch between IG and NEG modes", func() {
|
||||
var err error
|
||||
By("Create a basic HTTP ingress using NEG")
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(true)
|
||||
Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred())
|
||||
err = gceController.WaitForNegBackendService(jig.GetServicePorts(false))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Switch backend service to use IG")
|
||||
ginkgo.By("Switch backend service to use IG")
|
||||
svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[ingress.NEGAnnotation] = `{"ingress": false}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)); err != nil {
|
||||
e2elog.Logf("Failed to verify IG backend service: %v", err)
|
||||
e2elog.Logf("ginkgo.Failed to verify IG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "Expect backend service to target IG, but failed to observe")
|
||||
framework.ExpectNoError(err, "Expect backend service to target IG, but failed to observe")
|
||||
jig.WaitForIngress(true)
|
||||
|
||||
By("Switch backend service to use NEG")
|
||||
ginkgo.By("Switch backend service to use NEG")
|
||||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[ingress.NEGAnnotation] = `{"ingress": true}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)); err != nil {
|
||||
e2elog.Logf("Failed to verify NEG backend service: %v", err)
|
||||
e2elog.Logf("ginkgo.Failed to verify NEG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "Expect backend service to target NEG, but failed to observe")
|
||||
framework.ExpectNoError(err, "Expect backend service to target NEG, but failed to observe")
|
||||
jig.WaitForIngress(true)
|
||||
})
|
||||
|
||||
It("should be able to create a ClusterIP service", func() {
|
||||
By("Create a basic HTTP ingress using NEG")
|
||||
ginkgo.It("should be able to create a ClusterIP service", func() {
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(true)
|
||||
svcPorts := jig.GetServicePorts(false)
|
||||
Expect(gceController.WaitForNegBackendService(svcPorts)).NotTo(HaveOccurred())
|
||||
err := gceController.WaitForNegBackendService(svcPorts)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// ClusterIP ServicePorts have no NodePort
|
||||
for _, sp := range svcPorts {
|
||||
Expect(sp.NodePort).To(Equal(int32(0)))
|
||||
gomega.Expect(sp.NodePort).To(gomega.Equal(int32(0)))
|
||||
}
|
||||
})
|
||||
|
||||
It("should sync endpoints to NEG", func() {
|
||||
ginkgo.It("should sync endpoints to NEG", func() {
|
||||
name := "hostname"
|
||||
scaleAndValidateNEG := func(num int) {
|
||||
scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
if scale.Spec.Replicas != int32(num) {
|
||||
scale.Spec.Replicas = int32(num)
|
||||
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress()
|
||||
@@ -358,45 +363,47 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
e2elog.Logf("Expecting %d backends, got %d", num, res.Len())
|
||||
return res.Len() == num, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
By("Create a basic HTTP ingress using NEG")
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(true)
|
||||
jig.WaitForIngressToStable()
|
||||
Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred())
|
||||
err := gceController.WaitForNegBackendService(jig.GetServicePorts(false))
|
||||
framework.ExpectNoError(err)
|
||||
// initial replicas number is 1
|
||||
scaleAndValidateNEG(1)
|
||||
|
||||
By("Scale up number of backends to 5")
|
||||
ginkgo.By("Scale up number of backends to 5")
|
||||
scaleAndValidateNEG(5)
|
||||
|
||||
By("Scale down number of backends to 3")
|
||||
ginkgo.By("Scale down number of backends to 3")
|
||||
scaleAndValidateNEG(3)
|
||||
|
||||
By("Scale up number of backends to 6")
|
||||
ginkgo.By("Scale up number of backends to 6")
|
||||
scaleAndValidateNEG(6)
|
||||
|
||||
By("Scale down number of backends to 2")
|
||||
ginkgo.By("Scale down number of backends to 2")
|
||||
scaleAndValidateNEG(3)
|
||||
})
|
||||
|
||||
It("rolling update backend pods should not cause service disruption", func() {
|
||||
ginkgo.It("rolling update backend pods should not cause service disruption", func() {
|
||||
name := "hostname"
|
||||
replicas := 8
|
||||
By("Create a basic HTTP ingress using NEG")
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(true)
|
||||
jig.WaitForIngressToStable()
|
||||
Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred())
|
||||
err := gceController.WaitForNegBackendService(jig.GetServicePorts(false))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By(fmt.Sprintf("Scale backend replicas to %d", replicas))
|
||||
ginkgo.By(fmt.Sprintf("Scale backend replicas to %d", replicas))
|
||||
scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
scale.Spec.Replicas = int32(replicas)
|
||||
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress()
|
||||
@@ -405,21 +412,21 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
}
|
||||
return res.Len() == replicas, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Trigger rolling update and observe service disruption")
|
||||
ginkgo.By("Trigger rolling update and observe service disruption")
|
||||
deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// trigger by changing graceful termination period to 60 seconds
|
||||
gracePeriod := int64(60)
|
||||
deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod
|
||||
_, err = f.ClientSet.AppsV1().Deployments(ns).Update(deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
if int(deploy.Status.UpdatedReplicas) == replicas {
|
||||
if res.Len() == replicas {
|
||||
return true, nil
|
||||
@@ -427,29 +434,28 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
e2elog.Logf("Expecting %d different responses, but got %d.", replicas, res.Len())
|
||||
return false, nil
|
||||
|
||||
} else {
|
||||
e2elog.Logf("Waiting for rolling update to finished. Keep sending traffic.")
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf("Waiting for rolling update to finished. Keep sending traffic.")
|
||||
return false, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func() {
|
||||
ginkgo.It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func() {
|
||||
name := "hostname"
|
||||
expectedKeys := []int32{80, 443}
|
||||
|
||||
scaleAndValidateExposedNEG := func(num int) {
|
||||
scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
if scale.Spec.Replicas != int32(num) {
|
||||
scale.Spec.Replicas = int32(num)
|
||||
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) {
|
||||
svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
var status ingress.NegStatus
|
||||
v, ok := svc.Annotations[ingress.NEGStatusAnnotation]
|
||||
@@ -482,10 +488,10 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
}
|
||||
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
for _, neg := range status.NetworkEndpointGroups {
|
||||
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
if len(networkEndpoints) != num {
|
||||
e2elog.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints))
|
||||
return false, nil
|
||||
@@ -494,31 +500,32 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
By("Create a basic HTTP ingress using NEG")
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(true)
|
||||
Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred())
|
||||
err := gceController.WaitForNegBackendService(jig.GetServicePorts(false))
|
||||
framework.ExpectNoError(err)
|
||||
// initial replicas number is 1
|
||||
scaleAndValidateExposedNEG(1)
|
||||
|
||||
By("Scale up number of backends to 5")
|
||||
ginkgo.By("Scale up number of backends to 5")
|
||||
scaleAndValidateExposedNEG(5)
|
||||
|
||||
By("Scale down number of backends to 3")
|
||||
ginkgo.By("Scale down number of backends to 3")
|
||||
scaleAndValidateExposedNEG(3)
|
||||
|
||||
By("Scale up number of backends to 6")
|
||||
ginkgo.By("Scale up number of backends to 6")
|
||||
scaleAndValidateExposedNEG(6)
|
||||
|
||||
By("Scale down number of backends to 2")
|
||||
ginkgo.By("Scale down number of backends to 2")
|
||||
scaleAndValidateExposedNEG(3)
|
||||
})
|
||||
|
||||
It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func() {
|
||||
By("Create a basic HTTP ingress using standalone NEG")
|
||||
ginkgo.It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func() {
|
||||
ginkgo.By("Create a basic HTTP ingress using standalone NEG")
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(true)
|
||||
|
||||
@@ -526,120 +533,121 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
detectNegAnnotation(f, jig, gceController, ns, name, 2)
|
||||
|
||||
// Add Ingress annotation - NEGs should stay the same.
|
||||
By("Adding NEG Ingress annotation")
|
||||
ginkgo.By("Adding NEG Ingress annotation")
|
||||
svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
detectNegAnnotation(f, jig, gceController, ns, name, 2)
|
||||
|
||||
// Modify exposed NEG annotation, but keep ingress annotation
|
||||
By("Modifying exposed NEG annotation, but keep Ingress annotation")
|
||||
ginkgo.By("Modifying exposed NEG annotation, but keep Ingress annotation")
|
||||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
detectNegAnnotation(f, jig, gceController, ns, name, 2)
|
||||
|
||||
// Remove Ingress annotation. Expect 1 NEG
|
||||
By("Disabling Ingress annotation, but keeping one standalone NEG")
|
||||
ginkgo.By("Disabling Ingress annotation, but keeping one standalone NEG")
|
||||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
detectNegAnnotation(f, jig, gceController, ns, name, 1)
|
||||
|
||||
// Remove NEG annotation entirely. Expect 0 NEGs.
|
||||
By("Removing NEG annotation")
|
||||
ginkgo.By("Removing NEG annotation")
|
||||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
delete(svc.Annotations, ingress.NEGAnnotation)
|
||||
// Service cannot be ClusterIP if it's using Instance Groups.
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
detectNegAnnotation(f, jig, gceController, ns, name, 0)
|
||||
})
|
||||
})
|
||||
|
||||
Describe("GCE [Slow] [Feature:kubemci]", func() {
|
||||
ginkgo.Describe("GCE [Slow] [Feature:kubemci]", func() {
|
||||
var gceController *gce.IngressController
|
||||
var ipName, ipAddress string
|
||||
|
||||
// Platform specific setup
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
jig.Class = ingress.MulticlusterIngressClassValue
|
||||
jig.PollInterval = 5 * time.Second
|
||||
By("Initializing gce controller")
|
||||
ginkgo.By("Initializing gce controller")
|
||||
gceController = &gce.IngressController{
|
||||
Ns: ns,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
err := gceController.Init()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// TODO(https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/19):
|
||||
// Kubemci should reserve a static ip if user has not specified one.
|
||||
ipName = "kubemci-" + string(uuid.NewUUID())
|
||||
// ip released when the rest of lb resources are deleted in CleanupIngressController
|
||||
ipAddress = gceController.CreateStaticIP(ipName)
|
||||
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ipName, ipAddress))
|
||||
ginkgo.By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ipName, ipAddress))
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DescribeIng(ns)
|
||||
}
|
||||
if jig.Ingress == nil {
|
||||
By("No ingress created, no cleanup necessary")
|
||||
ginkgo.By("No ingress created, no cleanup necessary")
|
||||
} else {
|
||||
By("Deleting ingress")
|
||||
ginkgo.By("Deleting ingress")
|
||||
jig.TryDeleteIngress()
|
||||
}
|
||||
|
||||
By("Cleaning up cloud resources")
|
||||
Expect(gceController.CleanupIngressController()).NotTo(HaveOccurred())
|
||||
ginkgo.By("Cleaning up cloud resources")
|
||||
err := gceController.CleanupIngressController()
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
ginkgo.It("should conform to Ingress spec", func() {
|
||||
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{
|
||||
ingress.IngressStaticIPKey: ipName,
|
||||
})
|
||||
for _, t := range conformanceTests {
|
||||
By(t.EntryLog)
|
||||
ginkgo.By(t.EntryLog)
|
||||
t.Execute()
|
||||
By(t.ExitLog)
|
||||
ginkgo.By(t.ExitLog)
|
||||
jig.WaitForIngress(false /*waitForNodePort*/)
|
||||
}
|
||||
})
|
||||
|
||||
It("should create ingress with pre-shared certificate", func() {
|
||||
ginkgo.It("should create ingress with pre-shared certificate", func() {
|
||||
executePresharedCertTest(f, jig, ipName)
|
||||
})
|
||||
|
||||
It("should create ingress with backend HTTPS", func() {
|
||||
ginkgo.It("should create ingress with backend HTTPS", func() {
|
||||
executeBacksideBacksideHTTPSTest(f, jig, ipName)
|
||||
})
|
||||
|
||||
It("should support https-only annotation", func() {
|
||||
ginkgo.It("should support https-only annotation", func() {
|
||||
executeStaticIPHttpsOnlyTest(f, jig, ipName, ipAddress)
|
||||
})
|
||||
|
||||
It("should remove clusters as expected", func() {
|
||||
ginkgo.It("should remove clusters as expected", func() {
|
||||
ingAnnotations := map[string]string{
|
||||
ingress.IngressStaticIPKey: ipName,
|
||||
}
|
||||
@@ -668,8 +676,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
verifyKubemciStatusHas(name, "is spread across 0 cluster")
|
||||
})
|
||||
|
||||
It("single and multi-cluster ingresses should be able to exist together", func() {
|
||||
By("Creating a single cluster ingress first")
|
||||
ginkgo.It("single and multi-cluster ingresses should be able to exist together", func() {
|
||||
ginkgo.By("Creating a single cluster ingress first")
|
||||
jig.Class = ""
|
||||
singleIngFilePath := filepath.Join(ingress.GCEIngressManifestPath, "static-ip-2")
|
||||
jig.CreateIngress(singleIngFilePath, ns, map[string]string{}, map[string]string{})
|
||||
@@ -678,7 +686,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
singleIng := jig.Ingress
|
||||
|
||||
// Create the multi-cluster ingress next.
|
||||
By("Creating a multi-cluster ingress next")
|
||||
ginkgo.By("Creating a multi-cluster ingress next")
|
||||
jig.Class = ingress.MulticlusterIngressClassValue
|
||||
ingAnnotations := map[string]string{
|
||||
ingress.IngressStaticIPKey: ipName,
|
||||
@@ -688,7 +696,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
jig.WaitForIngress(false /*waitForNodePort*/)
|
||||
mciIngress := jig.Ingress
|
||||
|
||||
By("Deleting the single cluster ingress and verifying that multi-cluster ingress continues to work")
|
||||
ginkgo.By("Deleting the single cluster ingress and verifying that multi-cluster ingress continues to work")
|
||||
jig.Ingress = singleIng
|
||||
jig.Class = ""
|
||||
jig.TryDeleteIngress()
|
||||
@@ -696,18 +704,18 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
jig.Class = ingress.MulticlusterIngressClassValue
|
||||
jig.WaitForIngress(false /*waitForNodePort*/)
|
||||
|
||||
By("Cleanup: Deleting the multi-cluster ingress")
|
||||
ginkgo.By("Cleanup: Deleting the multi-cluster ingress")
|
||||
jig.TryDeleteIngress()
|
||||
})
|
||||
})
|
||||
|
||||
// Time: borderline 5m, slow by design
|
||||
Describe("[Slow] Nginx", func() {
|
||||
ginkgo.Describe("[Slow] Nginx", func() {
|
||||
var nginxController *ingress.NginxIngressController
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
By("Initializing nginx controller")
|
||||
ginkgo.By("Initializing nginx controller")
|
||||
jig.Class = "nginx"
|
||||
nginxController = &ingress.NginxIngressController{Ns: ns, Client: jig.Client}
|
||||
|
||||
@@ -723,30 +731,30 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
nginxController.Init()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
if framework.ProviderIs("gce", "gke") {
|
||||
framework.ExpectNoError(gce.GcloudComputeResourceDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID))
|
||||
}
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DescribeIng(ns)
|
||||
}
|
||||
if jig.Ingress == nil {
|
||||
By("No ingress created, no cleanup necessary")
|
||||
ginkgo.By("No ingress created, no cleanup necessary")
|
||||
return
|
||||
}
|
||||
By("Deleting ingress")
|
||||
ginkgo.By("Deleting ingress")
|
||||
jig.TryDeleteIngress()
|
||||
})
|
||||
|
||||
It("should conform to Ingress spec", func() {
|
||||
ginkgo.It("should conform to Ingress spec", func() {
|
||||
// Poll more frequently to reduce e2e completion time.
|
||||
// This test runs in presubmit.
|
||||
jig.PollInterval = 5 * time.Second
|
||||
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{})
|
||||
for _, t := range conformanceTests {
|
||||
By(t.EntryLog)
|
||||
ginkgo.By(t.EntryLog)
|
||||
t.Execute()
|
||||
By(t.ExitLog)
|
||||
ginkgo.By(t.ExitLog)
|
||||
jig.WaitForIngress(false)
|
||||
}
|
||||
})
|
||||
@@ -766,28 +774,28 @@ func verifyKubemciStatusHas(name, expectedSubStr string) {
|
||||
|
||||
func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) {
|
||||
preSharedCertName := "test-pre-shared-cert"
|
||||
By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName))
|
||||
ginkgo.By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName))
|
||||
testHostname := "test.ingress.com"
|
||||
cert, key, err := ingress.GenerateRSACerts(testHostname, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
// We would not be able to delete the cert until ingress controller
|
||||
// cleans up the target proxy that references it.
|
||||
By("Deleting ingress before deleting ssl certificate")
|
||||
ginkgo.By("Deleting ingress before deleting ssl certificate")
|
||||
if jig.Ingress != nil {
|
||||
jig.TryDeleteIngress()
|
||||
}
|
||||
By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName))
|
||||
ginkgo.By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName))
|
||||
err := wait.Poll(framework.LoadBalancerPollInterval, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) {
|
||||
e2elog.Logf("Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
|
||||
e2elog.Logf("ginkgo.Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to delete ssl certificate %q: %v", preSharedCertName, err))
|
||||
framework.ExpectNoError(err, fmt.Sprintf("ginkgo.Failed to delete ssl certificate %q: %v", preSharedCertName, err))
|
||||
}()
|
||||
_, err = gceCloud.CreateSslCertificate(&compute.SslCertificate{
|
||||
Name: preSharedCertName,
|
||||
@@ -795,9 +803,9 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat
|
||||
PrivateKey: string(key),
|
||||
Description: "pre-shared cert for ingress testing",
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create ssl certificate %q: %v", preSharedCertName, err))
|
||||
framework.ExpectNoError(err, fmt.Sprintf("ginkgo.Failed to create ssl certificate %q: %v", preSharedCertName, err))
|
||||
|
||||
By("Creating an ingress referencing the pre-shared certificate")
|
||||
ginkgo.By("Creating an ingress referencing the pre-shared certificate")
|
||||
// Create an ingress referencing this cert using pre-shared-cert annotation.
|
||||
ingAnnotations := map[string]string{
|
||||
ingress.IngressPreSharedCertKey: preSharedCertName,
|
||||
@@ -810,9 +818,9 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat
|
||||
}
|
||||
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{})
|
||||
|
||||
By("Test that ingress works with the pre-shared certificate")
|
||||
ginkgo.By("Test that ingress works with the pre-shared certificate")
|
||||
err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert)
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
|
||||
}
|
||||
|
||||
func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig, ipName, ip string) {
|
||||
@@ -821,30 +829,30 @@ func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig,
|
||||
ingress.IngressAllowHTTPKey: "false",
|
||||
}, map[string]string{})
|
||||
|
||||
By("waiting for Ingress to come up with ip: " + ip)
|
||||
ginkgo.By("waiting for Ingress to come up with ip: " + ip)
|
||||
httpClient := ingress.BuildInsecureClient(ingress.IngressReqTimeout)
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false))
|
||||
|
||||
By("should reject HTTP traffic")
|
||||
ginkgo.By("should reject HTTP traffic")
|
||||
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true))
|
||||
}
|
||||
|
||||
func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) {
|
||||
By("Creating a set of ingress, service and deployment that have backside re-encryption configured")
|
||||
ginkgo.By("Creating a set of ingress, service and deployment that have backside re-encryption configured")
|
||||
deployCreated, svcCreated, ingCreated, err := jig.SetUpBacksideHTTPSIngress(f.ClientSet, f.Namespace.Name, staticIPName)
|
||||
defer func() {
|
||||
By("Cleaning up re-encryption ingress, service and deployment")
|
||||
ginkgo.By("Cleaning up re-encryption ingress, service and deployment")
|
||||
if errs := jig.DeleteTestResource(f.ClientSet, deployCreated, svcCreated, ingCreated); len(errs) > 0 {
|
||||
framework.Failf("Failed to cleanup re-encryption ingress: %v", errs)
|
||||
framework.Failf("ginkgo.Failed to cleanup re-encryption ingress: %v", errs)
|
||||
}
|
||||
}()
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create re-encryption ingress")
|
||||
framework.ExpectNoError(err, "ginkgo.Failed to create re-encryption ingress")
|
||||
|
||||
By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name))
|
||||
ginkgo.By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name))
|
||||
ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, framework.LoadBalancerPollTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to wait for ingress IP")
|
||||
framework.ExpectNoError(err, "ginkgo.Failed to wait for ingress IP")
|
||||
|
||||
By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP))
|
||||
ginkgo.By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP))
|
||||
timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout}
|
||||
err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
|
||||
@@ -858,7 +866,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ
|
||||
e2elog.Logf("Poll succeeded, request was served by HTTPS")
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to verify backside re-encryption ingress")
|
||||
framework.ExpectNoError(err, "ginkgo.Failed to verify backside re-encryption ingress")
|
||||
}
|
||||
|
||||
func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) {
|
||||
@@ -872,7 +880,7 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
|
||||
if negs == 0 {
|
||||
err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false))
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to validate IG backend service: %v", err)
|
||||
e2elog.Logf("ginkgo.Failed to validate IG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@@ -898,10 +906,10 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
|
||||
}
|
||||
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
for _, neg := range status.NetworkEndpointGroups {
|
||||
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
if len(networkEndpoints) != 1 {
|
||||
e2elog.Logf("Expect NEG %s to exist, but got %d", neg, len(networkEndpoints))
|
||||
return false, nil
|
||||
@@ -910,11 +918,11 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
|
||||
|
||||
err = gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to validate NEG backend service: %v", err)
|
||||
e2elog.Logf("ginkgo.Failed to validate NEG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,26 +20,26 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/network/scale"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() {
|
||||
defer GinkgoRecover()
|
||||
defer ginkgo.GinkgoRecover()
|
||||
var (
|
||||
ns string
|
||||
)
|
||||
f := framework.NewDefaultFramework("ingress-scale")
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
Describe("GCE [Slow] [Serial] [Feature:IngressScale]", func() {
|
||||
ginkgo.Describe("GCE [Slow] [Serial] [Feature:IngressScale]", func() {
|
||||
var (
|
||||
scaleFramework *scale.IngressScaleFramework
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
scaleFramework = scale.NewIngressScaleFramework(f.ClientSet, ns, framework.TestContext.CloudConfig)
|
||||
@@ -48,13 +48,13 @@ var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() {
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
if errs := scaleFramework.CleanupScaleTest(); len(errs) != 0 {
|
||||
framework.Failf("Unexpected error while cleaning up ingress scale test: %v", errs)
|
||||
}
|
||||
})
|
||||
|
||||
It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() {
|
||||
ginkgo.It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() {
|
||||
if errs := scaleFramework.RunScaleTest(); len(errs) != 0 {
|
||||
framework.Failf("Unexpected error while running ingress scale test: %v", errs)
|
||||
}
|
||||
|
||||
@@ -33,8 +33,8 @@ import (
|
||||
"k8s.io/kubernetes/test/images/net/nat"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var kubeProxyE2eImage = imageutils.GetE2EImage(imageutils.Net)
|
||||
@@ -49,7 +49,7 @@ var _ = SIGDescribe("Network", func() {
|
||||
|
||||
fr := framework.NewDefaultFramework("network")
|
||||
|
||||
It("should set TCP CLOSE_WAIT timeout", func() {
|
||||
ginkgo.It("should set TCP CLOSE_WAIT timeout", func() {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet)
|
||||
ips := framework.CollectAddresses(nodes, v1.NodeInternalIP)
|
||||
|
||||
@@ -145,21 +145,21 @@ var _ = SIGDescribe("Network", func() {
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf(
|
||||
ginkgo.By(fmt.Sprintf(
|
||||
"Launching a server daemon on node %v (node ip: %v, image: %v)",
|
||||
serverNodeInfo.name,
|
||||
serverNodeInfo.nodeIP,
|
||||
kubeProxyE2eImage))
|
||||
fr.PodClient().CreateSync(serverPodSpec)
|
||||
|
||||
By(fmt.Sprintf(
|
||||
ginkgo.By(fmt.Sprintf(
|
||||
"Launching a client daemon on node %v (node ip: %v, image: %v)",
|
||||
clientNodeInfo.name,
|
||||
clientNodeInfo.nodeIP,
|
||||
kubeProxyE2eImage))
|
||||
fr.PodClient().CreateSync(clientPodSpec)
|
||||
|
||||
By("Make client connect")
|
||||
ginkgo.By("Make client connect")
|
||||
|
||||
options := nat.CloseWaitClientOptions{
|
||||
RemoteAddr: fmt.Sprintf("%v:%v",
|
||||
@@ -179,7 +179,7 @@ var _ = SIGDescribe("Network", func() {
|
||||
|
||||
<-time.After(time.Duration(1) * time.Second)
|
||||
|
||||
By("Checking /proc/net/nf_conntrack for the timeout")
|
||||
ginkgo.By("Checking /proc/net/nf_conntrack for the timeout")
|
||||
// If test flakes occur here, then this check should be performed
|
||||
// in a loop as there may be a race with the client connecting.
|
||||
e2essh.IssueSSHCommandWithResult(
|
||||
@@ -214,8 +214,8 @@ var _ = SIGDescribe("Network", func() {
|
||||
e2elog.Logf("conntrack entry timeout was: %v, expected: %v",
|
||||
timeoutSeconds, expectedTimeoutSeconds)
|
||||
|
||||
Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should(
|
||||
BeNumerically("<", (epsilonSeconds)))
|
||||
gomega.Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should(
|
||||
gomega.BeNumerically("<", (epsilonSeconds)))
|
||||
})
|
||||
|
||||
// Regression test for #74839, where:
|
||||
@@ -223,7 +223,7 @@ var _ = SIGDescribe("Network", func() {
|
||||
// a problem where spurious retransmits in a long-running TCP connection to a service
|
||||
// IP could result in the connection being closed with the error "Connection reset by
|
||||
// peer"
|
||||
It("should resolve connrection reset issue #74839 [Slow]", func() {
|
||||
ginkgo.It("should resolve connrection reset issue #74839 [Slow]", func() {
|
||||
serverLabel := map[string]string{
|
||||
"app": "boom-server",
|
||||
}
|
||||
@@ -265,7 +265,7 @@ var _ = SIGDescribe("Network", func() {
|
||||
_, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(serverPod)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Server pod created")
|
||||
ginkgo.By("Server pod created")
|
||||
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -284,7 +284,7 @@ var _ = SIGDescribe("Network", func() {
|
||||
_, err = fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(svc)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Server service created")
|
||||
ginkgo.By("Server service created")
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -319,13 +319,13 @@ var _ = SIGDescribe("Network", func() {
|
||||
_, err = fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Client pod created")
|
||||
ginkgo.By("Client pod created")
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
time.Sleep(3 * time.Second)
|
||||
resultPod, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Get(serverPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
Expect(resultPod.Status.ContainerStatuses[0].LastTerminationState.Terminated).Should(BeNil())
|
||||
gomega.Expect(resultPod.Status.ContainerStatuses[0].LastTerminationState.Terminated).Should(gomega.BeNil())
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -27,8 +27,8 @@ import (
|
||||
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -45,27 +45,27 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
var podServer *v1.Pod
|
||||
f := framework.NewDefaultFramework("network-policy")
|
||||
|
||||
Context("NetworkPolicy between server and client", func() {
|
||||
BeforeEach(func() {
|
||||
By("Creating a simple server that serves on port 80 and 81.")
|
||||
ginkgo.Context("NetworkPolicy between server and client", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By("Creating a simple server that serves on port 80 and 81.")
|
||||
podServer, service = createServerPodAndService(f, f.Namespace, "server", []int{80, 81})
|
||||
|
||||
By("Waiting for pod ready", func() {
|
||||
ginkgo.By("Waiting for pod ready", func() {
|
||||
err := f.WaitForPodReady(podServer.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
|
||||
// Create pods, which should be able to communicate with the server on port 80 and 81.
|
||||
By("Testing pods can connect to both ports when no policy is present.")
|
||||
ginkgo.By("Testing pods can connect to both ports when no policy is present.")
|
||||
testCanConnect(f, f.Namespace, "client-can-connect-80", service, 80)
|
||||
testCanConnect(f, f.Namespace, "client-can-connect-81", service, 81)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
cleanupServerPodAndService(f, podServer, service)
|
||||
})
|
||||
|
||||
It("should support a 'default-deny' policy [Feature:NetworkPolicy]", func() {
|
||||
ginkgo.It("should support a 'default-deny' policy [Feature:NetworkPolicy]", func() {
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "deny-all",
|
||||
@@ -77,7 +77,7 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
}
|
||||
|
||||
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
// Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server,
|
||||
@@ -85,8 +85,8 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
testCannotConnect(f, f.Namespace, "client-cannot-connect", service, 80)
|
||||
})
|
||||
|
||||
It("should enforce policy based on PodSelector [Feature:NetworkPolicy]", func() {
|
||||
By("Creating a network policy for the server which allows traffic from the pod 'client-a'.")
|
||||
ginkgo.It("should enforce policy based on PodSelector [Feature:NetworkPolicy]", func() {
|
||||
ginkgo.By("Creating a network policy for the server which allows traffic from the pod 'client-a'.")
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-client-a-via-pod-selector",
|
||||
@@ -112,18 +112,18 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
}
|
||||
|
||||
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
By("Creating client-a which should be able to contact the server.", func() {
|
||||
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
|
||||
testCanConnect(f, f.Namespace, "client-a", service, 80)
|
||||
})
|
||||
By("Creating client-b which should not be able to contact the server.", func() {
|
||||
ginkgo.By("Creating client-b which should not be able to contact the server.", func() {
|
||||
testCannotConnect(f, f.Namespace, "client-b", service, 80)
|
||||
})
|
||||
})
|
||||
|
||||
It("should enforce policy based on NamespaceSelector [Feature:NetworkPolicy]", func() {
|
||||
ginkgo.It("should enforce policy based on NamespaceSelector [Feature:NetworkPolicy]", func() {
|
||||
nsA := f.Namespace
|
||||
nsBName := f.BaseName + "-b"
|
||||
// The CreateNamespace helper uses the input name as a Name Generator, so the namespace itself
|
||||
@@ -132,15 +132,15 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
nsB, err := f.CreateNamespace(nsBName, map[string]string{
|
||||
"ns-name": nsBName,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Create Server with Service in NS-B
|
||||
e2elog.Logf("Waiting for server to come up.")
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, podServer)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Create Policy for that service that allows traffic only via namespace B
|
||||
By("Creating a network policy for the server which allows traffic from namespace-b.")
|
||||
ginkgo.By("Creating a network policy for the server which allows traffic from namespace-b.")
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-ns-b-via-namespace-selector",
|
||||
@@ -165,15 +165,15 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
},
|
||||
}
|
||||
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
testCannotConnect(f, nsA, "client-a", service, 80)
|
||||
testCanConnect(f, nsB, "client-b", service, 80)
|
||||
})
|
||||
|
||||
It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() {
|
||||
By("Creating a network policy for the Service which allows traffic only to one port.")
|
||||
ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() {
|
||||
ginkgo.By("Creating a network policy for the Service which allows traffic only to one port.")
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-ingress-on-port-81",
|
||||
@@ -194,16 +194,16 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
},
|
||||
}
|
||||
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
By("Testing pods can connect only to the port allowed by the policy.")
|
||||
ginkgo.By("Testing pods can connect only to the port allowed by the policy.")
|
||||
testCannotConnect(f, f.Namespace, "client-a", service, 80)
|
||||
testCanConnect(f, f.Namespace, "client-b", service, 81)
|
||||
})
|
||||
|
||||
It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() {
|
||||
By("Creating a network policy for the Service which allows traffic only to one port.")
|
||||
ginkgo.It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() {
|
||||
ginkgo.By("Creating a network policy for the Service which allows traffic only to one port.")
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-ingress-on-port-80",
|
||||
@@ -224,10 +224,10 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
},
|
||||
}
|
||||
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
By("Creating a network policy for the Service which allows traffic only to another port.")
|
||||
ginkgo.By("Creating a network policy for the Service which allows traffic only to another port.")
|
||||
policy2 := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-ingress-on-port-81",
|
||||
@@ -248,16 +248,16 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
},
|
||||
}
|
||||
policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy2)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy2)
|
||||
|
||||
By("Testing pods can connect to both ports when both policies are present.")
|
||||
ginkgo.By("Testing pods can connect to both ports when both policies are present.")
|
||||
testCanConnect(f, f.Namespace, "client-a", service, 80)
|
||||
testCanConnect(f, f.Namespace, "client-b", service, 81)
|
||||
})
|
||||
|
||||
It("should support allow-all policy [Feature:NetworkPolicy]", func() {
|
||||
By("Creating a network policy which allows all traffic.")
|
||||
ginkgo.It("should support allow-all policy [Feature:NetworkPolicy]", func() {
|
||||
ginkgo.By("Creating a network policy which allows all traffic.")
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-all",
|
||||
@@ -271,15 +271,15 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
},
|
||||
}
|
||||
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
By("Testing pods can connect to both ports when an 'allow-all' policy is present.")
|
||||
ginkgo.By("Testing pods can connect to both ports when an 'allow-all' policy is present.")
|
||||
testCanConnect(f, f.Namespace, "client-a", service, 80)
|
||||
testCanConnect(f, f.Namespace, "client-b", service, 81)
|
||||
})
|
||||
|
||||
It("should allow ingress access on one named port [Feature:NetworkPolicy]", func() {
|
||||
ginkgo.It("should allow ingress access on one named port [Feature:NetworkPolicy]", func() {
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-client-a-via-named-port-ingress-rule",
|
||||
@@ -301,18 +301,18 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
}
|
||||
|
||||
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
By("Creating client-a which should be able to contact the server.", func() {
|
||||
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
|
||||
testCanConnect(f, f.Namespace, "client-a", service, 80)
|
||||
})
|
||||
By("Creating client-b which should not be able to contact the server on port 81.", func() {
|
||||
ginkgo.By("Creating client-b which should not be able to contact the server on port 81.", func() {
|
||||
testCannotConnect(f, f.Namespace, "client-b", service, 81)
|
||||
})
|
||||
})
|
||||
|
||||
It("should allow egress access on one named port [Feature:NetworkPolicy]", func() {
|
||||
ginkgo.It("should allow egress access on one named port [Feature:NetworkPolicy]", func() {
|
||||
clientPodName := "client-a"
|
||||
protocolUDP := v1.ProtocolUDP
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
@@ -343,13 +343,13 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
}
|
||||
|
||||
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
By("Creating client-a which should be able to contact the server.", func() {
|
||||
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
|
||||
testCanConnect(f, f.Namespace, clientPodName, service, 80)
|
||||
})
|
||||
By("Creating client-a which should not be able to contact the server on port 81.", func() {
|
||||
ginkgo.By("Creating client-a which should not be able to contact the server on port 81.", func() {
|
||||
testCannotConnect(f, f.Namespace, clientPodName, service, 81)
|
||||
})
|
||||
})
|
||||
@@ -357,10 +357,10 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
})
|
||||
|
||||
func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) {
|
||||
By(fmt.Sprintf("Creating client pod %s that should successfully connect to %s.", podName, service.Name))
|
||||
ginkgo.By(fmt.Sprintf("Creating client pod %s that should successfully connect to %s.", podName, service.Name))
|
||||
podClient := createNetworkClientPod(f, ns, podName, service, targetPort)
|
||||
defer func() {
|
||||
By(fmt.Sprintf("Cleaning up the pod %s", podName))
|
||||
ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podName))
|
||||
if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil {
|
||||
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
|
||||
}
|
||||
@@ -368,7 +368,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se
|
||||
|
||||
e2elog.Logf("Waiting for %s to complete.", podClient.Name)
|
||||
err := framework.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podClient.Name, ns.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Pod did not finish as expected.")
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Pod did not finish as expected.")
|
||||
|
||||
e2elog.Logf("Waiting for %s to complete.", podClient.Name)
|
||||
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
|
||||
@@ -404,10 +404,10 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se
|
||||
}
|
||||
|
||||
func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) {
|
||||
By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", podName, service.Name))
|
||||
ginkgo.By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", podName, service.Name))
|
||||
podClient := createNetworkClientPod(f, ns, podName, service, targetPort)
|
||||
defer func() {
|
||||
By(fmt.Sprintf("Cleaning up the pod %s", podName))
|
||||
ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podName))
|
||||
if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil {
|
||||
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
|
||||
}
|
||||
@@ -495,7 +495,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
|
||||
})
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating a server pod %s in namespace %s", podName, namespace.Name))
|
||||
ginkgo.By(fmt.Sprintf("Creating a server pod %s in namespace %s", podName, namespace.Name))
|
||||
pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
@@ -508,11 +508,11 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Created pod %v", pod.ObjectMeta.Name)
|
||||
|
||||
svcName := fmt.Sprintf("svc-%s", podName)
|
||||
By(fmt.Sprintf("Creating a service %s for pod %s in namespace %s", svcName, podName, namespace.Name))
|
||||
ginkgo.By(fmt.Sprintf("Creating a service %s for pod %s in namespace %s", svcName, podName, namespace.Name))
|
||||
svc, err := f.ClientSet.CoreV1().Services(namespace.Name).Create(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: svcName,
|
||||
@@ -524,18 +524,18 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
|
||||
},
|
||||
},
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
e2elog.Logf("Created service %s", svc.Name)
|
||||
|
||||
return pod, svc
|
||||
}
|
||||
|
||||
func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1.Service) {
|
||||
By("Cleaning up the server.")
|
||||
ginkgo.By("Cleaning up the server.")
|
||||
if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
|
||||
framework.Failf("unable to cleanup pod %v: %v", pod.Name, err)
|
||||
}
|
||||
By("Cleaning up the server's service.")
|
||||
ginkgo.By("Cleaning up the server's service.")
|
||||
if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(service.Name, nil); err != nil {
|
||||
framework.Failf("unable to cleanup svc %v: %v", service.Name, err)
|
||||
}
|
||||
@@ -569,13 +569,13 @@ func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, pod
|
||||
},
|
||||
})
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
func cleanupNetworkPolicy(f *framework.Framework, policy *networkingv1.NetworkPolicy) {
|
||||
By("Cleaning up the policy.")
|
||||
ginkgo.By("Cleaning up the policy.")
|
||||
if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(policy.Name, nil); err != nil {
|
||||
framework.Failf("unable to cleanup policy %v: %v", policy.Name, err)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user