Merge pull request #90187 from julianvmodesto/last-applied-updater

Implement server-side apply upgrade and downgrade
This commit is contained in:
Kubernetes Prow Robot
2020-07-13 16:45:20 -07:00
committed by GitHub
14 changed files with 1462 additions and 65 deletions

View File

@@ -393,6 +393,34 @@ run_kubectl_server_side_apply_tests() {
# clean-up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
## kubectl apply upgrade
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
kube::log::status "Testing upgrade kubectl client-side apply to server-side apply"
# run client-side apply
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
# test upgrade does not work with non-standard server-side apply field manager
! kubectl apply --server-side --field-manager="not-kubectl" -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}" || exit 1
# test upgrade from client-side apply to server-side apply
kubectl apply --server-side -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}"
# Post-Condition: pod "test-pod" has configuration annotation
grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")"
output_message=$(kubectl apply view-last-applied pod/test-pod -o json 2>&1 "${kube_flags[@]:?}")
kube::test::if_has_string "${output_message}" '"name": "test-pod-applied"'
kube::log::status "Testing downgrade kubectl server-side apply to client-side apply"
# test downgrade from server-side apply to client-side apply
kubectl apply --server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
# Post-Condition: pod "test-pod" has configuration annotation
grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")"
output_message=$(kubectl apply view-last-applied pod/test-pod -o json 2>&1 "${kube_flags[@]:?}")
kube::test::if_has_string "${output_message}" '"name": "test-pod-label"'
kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}"
# clean-up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
## kubectl apply dry-run on CR
# Create CRD
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__

View File

@@ -28,6 +28,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/features:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",

View File

@@ -38,6 +38,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
yamlutil "k8s.io/apimachinery/pkg/util/yaml"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes"
@@ -2464,3 +2465,133 @@ func benchRepeatedUpdate(client kubernetes.Interface, podName string) func(*test
}
}
}
func TestUpgradeClientSideToServerSideApply(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)()
_, client, closeFn := setup(t)
defer closeFn()
obj := []byte(`
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-deployment
annotations:
"kubectl.kubernetes.io/last-applied-configuration": |
{"kind":"Deployment","apiVersion":"apps/v1","metadata":{"name":"my-deployment","labels":{"app":"my-app"}},"spec":{"replicas": 3,"template":{"metadata":{"labels":{"app":"my-app"}},"spec":{"containers":[{"name":"my-c","image":"my-image"}]}}}}
labels:
app: my-app
spec:
replicas: 100000
selector:
matchLabels:
app: my-app
template:
metadata:
labels:
app: my-app
spec:
containers:
- name: my-c
image: my-image
`)
deployment, err := yamlutil.ToJSON(obj)
if err != nil {
t.Fatalf("Failed marshal yaml: %v", err)
}
_, err = client.CoreV1().RESTClient().Post().
AbsPath("/apis/apps/v1").
Namespace("default").
Resource("deployments").
Body(deployment).Do(context.TODO()).Get()
if err != nil {
t.Fatalf("Failed to create object: %v", err)
}
obj = []byte(`
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-deployment
labels:
app: my-new-label
spec:
replicas: 3 # expect conflict
template:
metadata:
labels:
app: my-app
spec:
containers:
- name: my-c
image: my-image
`)
deployment, err = yamlutil.ToJSON(obj)
if err != nil {
t.Fatalf("Failed marshal yaml: %v", err)
}
_, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType).
AbsPath("/apis/apps/v1").
Namespace("default").
Resource("deployments").
Name("my-deployment").
Param("fieldManager", "kubectl").
Body(deployment).
Do(context.TODO()).
Get()
if !apierrors.IsConflict(err) {
t.Fatalf("Expected conflict error but got: %v", err)
}
obj = []byte(`
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-deployment
labels:
app: my-new-label
spec:
template:
metadata:
labels:
app: my-app
spec:
containers:
- name: my-c
image: my-image-new
`)
deployment, err = yamlutil.ToJSON(obj)
if err != nil {
t.Fatalf("Failed marshal yaml: %v", err)
}
_, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType).
AbsPath("/apis/apps/v1").
Namespace("default").
Resource("deployments").
Name("my-deployment").
Param("fieldManager", "kubectl").
Body(deployment).
Do(context.TODO()).
Get()
if err != nil {
t.Fatalf("Failed to apply object: %v", err)
}
deploymentObj, err := client.AppsV1().Deployments("default").Get(context.TODO(), "my-deployment", metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get object: %v", err)
}
if *deploymentObj.Spec.Replicas != 100000 {
t.Fatalf("expected to get obj with replicas %d, but got %d", 100000, *deploymentObj.Spec.Replicas)
}
if deploymentObj.Spec.Template.Spec.Containers[0].Image != "my-image-new" {
t.Fatalf("expected to get obj with image %s, but got %s", "my-image-new", deploymentObj.Spec.Template.Spec.Containers[0].Image)
}
}