Merge pull request #122595 from dims/support-building-with-and-without-cloud-providers
KUBE_PROVIDERLESS - Support building with and without cloud providers
This commit is contained in:
@@ -31,6 +31,13 @@ unset CDPATH
|
||||
# they can explicitly set GO111MODULE=on
|
||||
export GO111MODULE=off
|
||||
|
||||
# FIXME(dims): Note that here we assume that if GOFLAGS are already set we
|
||||
# leave them as-is and not try to add providerless to it. So if you
|
||||
# really need to set your own GOFLAGS, ensure you add "providerless" explicitly
|
||||
if [[ "${KUBE_PROVIDERLESS:-"n"}" == "y" ]]; then
|
||||
export GOFLAGS=${GOFLAGS:-"-tags=providerless"}
|
||||
fi
|
||||
|
||||
# The root of the build/dist directory
|
||||
KUBE_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)"
|
||||
|
||||
|
@@ -26,7 +26,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
cd "${KUBE_ROOT}"
|
||||
# verify the providerless build
|
||||
# https://github.com/kubernetes/enhancements/blob/master/keps/sig-cloud-provider/1179-building-without-in-tree-providers/README.md
|
||||
hack/verify-typecheck.sh --skip-test --tags=providerless --ignore-dirs=test/e2e
|
||||
hack/verify-typecheck.sh --skip-test --tags=providerless
|
||||
|
||||
# verify using go list
|
||||
if _out="$(go list -mod=readonly -tags "providerless" -e -json k8s.io/kubernetes/cmd/kubelet/... \
|
||||
|
@@ -22,6 +22,8 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@@ -160,3 +162,14 @@ func ipnetToStringList(inCIDRs []*net.IPNet) []string {
|
||||
}
|
||||
return outCIDRs
|
||||
}
|
||||
|
||||
// occupyServiceCIDR removes the service CIDR range from the cluster CIDR if it
|
||||
// intersects.
|
||||
func occupyServiceCIDR(set *cidrset.CidrSet, clusterCIDR, serviceCIDR *net.IPNet) error {
|
||||
if clusterCIDR.Contains(serviceCIDR.IP) || serviceCIDR.Contains(clusterCIDR.IP) {
|
||||
if err := set.Occupy(serviceCIDR); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -165,17 +165,6 @@ func (c *Controller) Run(ctx context.Context) {
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
// occupyServiceCIDR removes the service CIDR range from the cluster CIDR if it
|
||||
// intersects.
|
||||
func occupyServiceCIDR(set *cidrset.CidrSet, clusterCIDR, serviceCIDR *net.IPNet) error {
|
||||
if clusterCIDR.Contains(serviceCIDR.IP) || serviceCIDR.Contains(clusterCIDR.IP) {
|
||||
if err := set.Occupy(serviceCIDR); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type nodeState struct {
|
||||
t Timeout
|
||||
}
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,6 +1,3 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
|
@@ -52,14 +52,6 @@ import (
|
||||
// ensure auth plugins are loaded
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
|
||||
// ensure that cloud providers are loaded
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/aws"
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/azure"
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark"
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/openstack"
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere"
|
||||
|
||||
// Ensure that logging flags are part of the command line.
|
||||
_ "k8s.io/component-base/logs/testinit"
|
||||
)
|
||||
|
27
test/e2e/framework/provider_less.go
Normal file
27
test/e2e/framework/provider_less.go
Normal file
@@ -0,0 +1,27 @@
|
||||
//go:build providerless
|
||||
// +build providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
func init() {
|
||||
// fake "gce"
|
||||
RegisterProvider("gce", func() (ProviderInterface, error) {
|
||||
return NullProvider{}, nil
|
||||
})
|
||||
}
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
|
17
test/e2e/framework/providers/azure/doc.go
Normal file
17
test/e2e/framework/providers/azure/doc.go
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
17
test/e2e/framework/providers/gce/doc.go
Normal file
17
test/e2e/framework/providers/gce/doc.go
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
|
@@ -19,13 +19,9 @@ package network
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@@ -33,15 +29,8 @@ import (
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/kubernetes/test/e2e/feature"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
"k8s.io/kubernetes/test/e2e/network/common"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
@@ -49,498 +38,6 @@ import (
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
negUpdateTimeout = 2 * time.Minute
|
||||
)
|
||||
|
||||
var _ = common.SIGDescribe("Loadbalancing: L7", func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
var (
|
||||
ns string
|
||||
jig *e2eingress.TestJig
|
||||
conformanceTests []e2eingress.ConformanceTests
|
||||
)
|
||||
f := framework.NewDefaultFramework("ingress")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
|
||||
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
jig = e2eingress.NewIngressTestJig(f.ClientSet)
|
||||
ns = f.Namespace.Name
|
||||
|
||||
// this test wants powerful permissions. Since the namespace names are unique, we can leave this
|
||||
// lying around so we don't have to race any caches
|
||||
err := e2eauth.BindClusterRole(ctx, jig.Client.RbacV1(), "cluster-admin", f.Namespace.Name,
|
||||
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2eauth.WaitForAuthorizationUpdate(ctx, jig.Client.AuthorizationV1(),
|
||||
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
|
||||
"", "create", schema.GroupResource{Resource: "pods"}, true)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
// Before enabling this loadbalancer test in any other test list you must
|
||||
// make sure the associated project has enough quota. At the time of this
|
||||
// writing a GCE project is allowed 3 backend services by default. This
|
||||
// test requires at least 5.
|
||||
//
|
||||
// Slow by design ~10m for each "It" block dominated by loadbalancer setup time
|
||||
// TODO: write similar tests for nginx, haproxy and AWS Ingress.
|
||||
f.Describe("GCE", framework.WithSlow(), feature.Ingress, func() {
|
||||
var gceController *gce.IngressController
|
||||
|
||||
// Platform specific setup
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
||||
ginkgo.By("Initializing gce controller")
|
||||
gceController = &gce.IngressController{
|
||||
Ns: ns,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
err := gceController.Init(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
e2eingress.DescribeIng(ns)
|
||||
}
|
||||
if jig.Ingress == nil {
|
||||
ginkgo.By("No ingress created, no cleanup necessary")
|
||||
return
|
||||
}
|
||||
ginkgo.By("Deleting ingress")
|
||||
jig.TryDeleteIngress(ctx)
|
||||
|
||||
ginkgo.By("Cleaning up cloud resources")
|
||||
err := gceController.CleanupIngressController(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("should conform to Ingress spec", func(ctx context.Context) {
|
||||
conformanceTests = e2eingress.CreateIngressComformanceTests(ctx, jig, ns, map[string]string{})
|
||||
for _, t := range conformanceTests {
|
||||
ginkgo.By(t.EntryLog)
|
||||
t.Execute()
|
||||
ginkgo.By(t.ExitLog)
|
||||
jig.WaitForIngress(ctx, true)
|
||||
}
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
f.Describe("GCE", framework.WithSlow(), feature.NEG, func() {
|
||||
var gceController *gce.IngressController
|
||||
|
||||
// Platform specific setup
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
||||
ginkgo.By("Initializing gce controller")
|
||||
gceController = &gce.IngressController{
|
||||
Ns: ns,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
err := gceController.Init(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
e2eingress.DescribeIng(ns)
|
||||
}
|
||||
if jig.Ingress == nil {
|
||||
ginkgo.By("No ingress created, no cleanup necessary")
|
||||
return
|
||||
}
|
||||
ginkgo.By("Deleting ingress")
|
||||
jig.TryDeleteIngress(ctx)
|
||||
|
||||
ginkgo.By("Cleaning up cloud resources")
|
||||
err := gceController.CleanupIngressController(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("should conform to Ingress spec", func(ctx context.Context) {
|
||||
jig.PollInterval = 5 * time.Second
|
||||
conformanceTests = e2eingress.CreateIngressComformanceTests(ctx, jig, ns, map[string]string{
|
||||
e2eingress.NEGAnnotation: `{"ingress": true}`,
|
||||
})
|
||||
for _, t := range conformanceTests {
|
||||
ginkgo.By(t.EntryLog)
|
||||
t.Execute()
|
||||
ginkgo.By(t.ExitLog)
|
||||
jig.WaitForIngress(ctx, true)
|
||||
err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should be able to switch between IG and NEG modes", func(ctx context.Context) {
|
||||
var err error
|
||||
propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, f.ClientSet)
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(ctx, true)
|
||||
err = gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Switch backend service to use IG")
|
||||
svcList, err := f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress": false}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = wait.PollWithContext(ctx, 5*time.Second, propagationTimeout, func(ctx context.Context) (bool, error) {
|
||||
if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(ctx, false)); err != nil {
|
||||
framework.Logf("ginkgo.Failed to verify IG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Expect backend service to target IG, but failed to observe")
|
||||
jig.WaitForIngress(ctx, true)
|
||||
|
||||
ginkgo.By("Switch backend service to use NEG")
|
||||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress": true}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = wait.PollWithContext(ctx, 5*time.Second, propagationTimeout, func(ctx context.Context) (bool, error) {
|
||||
if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(ctx, false)); err != nil {
|
||||
framework.Logf("ginkgo.Failed to verify NEG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Expect backend service to target NEG, but failed to observe")
|
||||
jig.WaitForIngress(ctx, true)
|
||||
})
|
||||
|
||||
ginkgo.It("should be able to create a ClusterIP service", func(ctx context.Context) {
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(ctx, true)
|
||||
svcPorts := jig.GetServicePorts(ctx, false)
|
||||
err := gceController.WaitForNegBackendService(ctx, svcPorts)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// ClusterIP ServicePorts have no NodePort
|
||||
for _, sp := range svcPorts {
|
||||
gomega.Expect(sp.NodePort).To(gomega.Equal(int32(0)))
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should sync endpoints to NEG", func(ctx context.Context) {
|
||||
name := "hostname"
|
||||
scaleAndValidateNEG := func(num int) {
|
||||
scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(ctx, name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if scale.Spec.Replicas != int32(num) {
|
||||
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||
scale.Spec.Replicas = int32(num)
|
||||
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(ctx, name, scale, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress(ctx)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("Expecting %d backends, got %d", num, res.Len())
|
||||
return res.Len() == num, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(ctx, true)
|
||||
jig.WaitForIngressToStable(ctx)
|
||||
err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
|
||||
framework.ExpectNoError(err)
|
||||
// initial replicas number is 1
|
||||
scaleAndValidateNEG(1)
|
||||
|
||||
ginkgo.By("Scale up number of backends to 5")
|
||||
scaleAndValidateNEG(5)
|
||||
|
||||
ginkgo.By("Scale down number of backends to 3")
|
||||
scaleAndValidateNEG(3)
|
||||
|
||||
ginkgo.By("Scale up number of backends to 6")
|
||||
scaleAndValidateNEG(6)
|
||||
|
||||
ginkgo.By("Scale down number of backends to 2")
|
||||
scaleAndValidateNEG(3)
|
||||
})
|
||||
|
||||
ginkgo.It("rolling update backend pods should not cause service disruption", func(ctx context.Context) {
|
||||
name := "hostname"
|
||||
replicas := 8
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(ctx, true)
|
||||
jig.WaitForIngressToStable(ctx)
|
||||
err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Scale backend replicas to %d", replicas))
|
||||
scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(ctx, name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||
scale.Spec.Replicas = int32(replicas)
|
||||
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(ctx, name, scale, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, f.ClientSet)
|
||||
err = wait.Poll(10*time.Second, propagationTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress(ctx)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return res.Len() == replicas, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Trigger rolling update and observe service disruption")
|
||||
deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
// trigger by changing graceful termination period to 60 seconds
|
||||
gracePeriod := int64(60)
|
||||
deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod
|
||||
_, err = f.ClientSet.AppsV1().Deployments(ns).Update(ctx, deploy, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
err = wait.Poll(10*time.Second, propagationTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if int(deploy.Status.UpdatedReplicas) == replicas {
|
||||
if res.Len() == replicas {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Expecting %d different responses, but got %d.", replicas, res.Len())
|
||||
return false, nil
|
||||
|
||||
}
|
||||
framework.Logf("Waiting for rolling update to finished. Keep sending traffic.")
|
||||
return false, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func(ctx context.Context) {
|
||||
name := "hostname"
|
||||
expectedKeys := []int32{80, 443}
|
||||
|
||||
scaleAndValidateExposedNEG := func(num int) {
|
||||
scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(ctx, name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if scale.Spec.Replicas != int32(num) {
|
||||
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||
scale.Spec.Replicas = int32(num)
|
||||
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(ctx, name, scale, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) {
|
||||
svc, err := f.ClientSet.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
var status e2eingress.NegStatus
|
||||
v, ok := svc.Annotations[e2eingress.NEGStatusAnnotation]
|
||||
if !ok {
|
||||
// Wait for NEG sync loop to find NEGs
|
||||
framework.Logf("Waiting for %v, got: %+v", e2eingress.NEGStatusAnnotation, svc.Annotations)
|
||||
return false, nil
|
||||
}
|
||||
err = json.Unmarshal([]byte(v), &status)
|
||||
if err != nil {
|
||||
framework.Logf("Error in parsing Expose NEG annotation: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("Got %v: %v", e2eingress.NEGStatusAnnotation, v)
|
||||
|
||||
// Expect 2 NEGs to be created based on the test setup (neg-exposed)
|
||||
if len(status.NetworkEndpointGroups) != 2 {
|
||||
framework.Logf("Expected 2 NEGs, got %d", len(status.NetworkEndpointGroups))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, port := range expectedKeys {
|
||||
if _, ok := status.NetworkEndpointGroups[port]; !ok {
|
||||
framework.Logf("Expected ServicePort key %v, but does not exist", port)
|
||||
}
|
||||
}
|
||||
|
||||
if len(status.NetworkEndpointGroups) != len(expectedKeys) {
|
||||
framework.Logf("Expected length of %+v to equal length of %+v, but does not", status.NetworkEndpointGroups, expectedKeys)
|
||||
}
|
||||
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
framework.ExpectNoError(err)
|
||||
for _, neg := range status.NetworkEndpointGroups {
|
||||
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
|
||||
framework.ExpectNoError(err)
|
||||
if len(networkEndpoints) != num {
|
||||
framework.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints))
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(ctx, true)
|
||||
err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
|
||||
framework.ExpectNoError(err)
|
||||
// initial replicas number is 1
|
||||
scaleAndValidateExposedNEG(1)
|
||||
|
||||
ginkgo.By("Scale up number of backends to 5")
|
||||
scaleAndValidateExposedNEG(5)
|
||||
|
||||
ginkgo.By("Scale down number of backends to 3")
|
||||
scaleAndValidateExposedNEG(3)
|
||||
|
||||
ginkgo.By("Scale up number of backends to 6")
|
||||
scaleAndValidateExposedNEG(6)
|
||||
|
||||
ginkgo.By("Scale down number of backends to 2")
|
||||
scaleAndValidateExposedNEG(3)
|
||||
})
|
||||
|
||||
ginkgo.It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func(ctx context.Context) {
|
||||
ginkgo.By("Create a basic HTTP ingress using standalone NEG")
|
||||
jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(ctx, true)
|
||||
|
||||
name := "hostname"
|
||||
detectNegAnnotation(ctx, f, jig, gceController, ns, name, 2)
|
||||
|
||||
// Add Ingress annotation - NEGs should stay the same.
|
||||
ginkgo.By("Adding NEG Ingress annotation")
|
||||
svcList, err := f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
detectNegAnnotation(ctx, f, jig, gceController, ns, name, 2)
|
||||
|
||||
// Modify exposed NEG annotation, but keep ingress annotation
|
||||
ginkgo.By("Modifying exposed NEG annotation, but keep Ingress annotation")
|
||||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
detectNegAnnotation(ctx, f, jig, gceController, ns, name, 2)
|
||||
|
||||
// Remove Ingress annotation. Expect 1 NEG
|
||||
ginkgo.By("Disabling Ingress annotation, but keeping one standalone NEG")
|
||||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
detectNegAnnotation(ctx, f, jig, gceController, ns, name, 1)
|
||||
|
||||
// Remove NEG annotation entirely. Expect 0 NEGs.
|
||||
ginkgo.By("Removing NEG annotation")
|
||||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
delete(svc.Annotations, e2eingress.NEGAnnotation)
|
||||
// Service cannot be ClusterIP if it's using Instance Groups.
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
detectNegAnnotation(ctx, f, jig, gceController, ns, name, 0)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func detectNegAnnotation(ctx context.Context, f *framework.Framework, jig *e2eingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) {
|
||||
if err := wait.Poll(5*time.Second, negUpdateTimeout, func() (bool, error) {
|
||||
svc, err := f.ClientSet.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// if we expect no NEGs, then we should be using IGs
|
||||
if negs == 0 {
|
||||
err := gceController.BackendServiceUsingIG(jig.GetServicePorts(ctx, false))
|
||||
if err != nil {
|
||||
framework.Logf("ginkgo.Failed to validate IG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
var status e2eingress.NegStatus
|
||||
v, ok := svc.Annotations[e2eingress.NEGStatusAnnotation]
|
||||
if !ok {
|
||||
framework.Logf("Waiting for %v, got: %+v", e2eingress.NEGStatusAnnotation, svc.Annotations)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(v), &status)
|
||||
if err != nil {
|
||||
framework.Logf("Error in parsing Expose NEG annotation: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("Got %v: %v", e2eingress.NEGStatusAnnotation, v)
|
||||
|
||||
if len(status.NetworkEndpointGroups) != negs {
|
||||
framework.Logf("Expected %d NEGs, got %d", negs, len(status.NetworkEndpointGroups))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
framework.ExpectNoError(err)
|
||||
for _, neg := range status.NetworkEndpointGroups {
|
||||
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
|
||||
framework.ExpectNoError(err)
|
||||
if len(networkEndpoints) != 1 {
|
||||
framework.Logf("Expect NEG %s to exist, but got %d", neg, len(networkEndpoints))
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
err = gceController.BackendServiceUsingNEG(jig.GetServicePorts(ctx, false))
|
||||
if err != nil {
|
||||
framework.Logf("ginkgo.Failed to validate NEG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
|
||||
var _ = common.SIGDescribe("Ingress API", func() {
|
||||
f := framework.NewDefaultFramework("ingress")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
|
539
test/e2e/network/ingress_gce.go
Normal file
539
test/e2e/network/ingress_gce.go
Normal file
@@ -0,0 +1,539 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/kubernetes/test/e2e/feature"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
"k8s.io/kubernetes/test/e2e/network/common"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
negUpdateTimeout = 2 * time.Minute
|
||||
)
|
||||
|
||||
var _ = common.SIGDescribe("Loadbalancing: L7", func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
var (
|
||||
ns string
|
||||
jig *e2eingress.TestJig
|
||||
conformanceTests []e2eingress.ConformanceTests
|
||||
)
|
||||
f := framework.NewDefaultFramework("ingress")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
|
||||
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
jig = e2eingress.NewIngressTestJig(f.ClientSet)
|
||||
ns = f.Namespace.Name
|
||||
|
||||
// this test wants powerful permissions. Since the namespace names are unique, we can leave this
|
||||
// lying around so we don't have to race any caches
|
||||
err := e2eauth.BindClusterRole(ctx, jig.Client.RbacV1(), "cluster-admin", f.Namespace.Name,
|
||||
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2eauth.WaitForAuthorizationUpdate(ctx, jig.Client.AuthorizationV1(),
|
||||
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
|
||||
"", "create", schema.GroupResource{Resource: "pods"}, true)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
// Before enabling this loadbalancer test in any other test list you must
|
||||
// make sure the associated project has enough quota. At the time of this
|
||||
// writing a GCE project is allowed 3 backend services by default. This
|
||||
// test requires at least 5.
|
||||
//
|
||||
// Slow by design ~10m for each "It" block dominated by loadbalancer setup time
|
||||
// TODO: write similar tests for nginx, haproxy and AWS Ingress.
|
||||
f.Describe("GCE", framework.WithSlow(), feature.Ingress, func() {
|
||||
var gceController *gce.IngressController
|
||||
|
||||
// Platform specific setup
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
||||
ginkgo.By("Initializing gce controller")
|
||||
gceController = &gce.IngressController{
|
||||
Ns: ns,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
err := gceController.Init(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
e2eingress.DescribeIng(ns)
|
||||
}
|
||||
if jig.Ingress == nil {
|
||||
ginkgo.By("No ingress created, no cleanup necessary")
|
||||
return
|
||||
}
|
||||
ginkgo.By("Deleting ingress")
|
||||
jig.TryDeleteIngress(ctx)
|
||||
|
||||
ginkgo.By("Cleaning up cloud resources")
|
||||
err := gceController.CleanupIngressController(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("should conform to Ingress spec", func(ctx context.Context) {
|
||||
conformanceTests = e2eingress.CreateIngressComformanceTests(ctx, jig, ns, map[string]string{})
|
||||
for _, t := range conformanceTests {
|
||||
ginkgo.By(t.EntryLog)
|
||||
t.Execute()
|
||||
ginkgo.By(t.ExitLog)
|
||||
jig.WaitForIngress(ctx, true)
|
||||
}
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
f.Describe("GCE", framework.WithSlow(), feature.NEG, func() {
|
||||
var gceController *gce.IngressController
|
||||
|
||||
// Platform specific setup
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
||||
ginkgo.By("Initializing gce controller")
|
||||
gceController = &gce.IngressController{
|
||||
Ns: ns,
|
||||
Client: jig.Client,
|
||||
Cloud: framework.TestContext.CloudConfig,
|
||||
}
|
||||
err := gceController.Init(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
// Platform specific cleanup
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
if ginkgo.CurrentSpecReport().Failed() {
|
||||
e2eingress.DescribeIng(ns)
|
||||
}
|
||||
if jig.Ingress == nil {
|
||||
ginkgo.By("No ingress created, no cleanup necessary")
|
||||
return
|
||||
}
|
||||
ginkgo.By("Deleting ingress")
|
||||
jig.TryDeleteIngress(ctx)
|
||||
|
||||
ginkgo.By("Cleaning up cloud resources")
|
||||
err := gceController.CleanupIngressController(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("should conform to Ingress spec", func(ctx context.Context) {
|
||||
jig.PollInterval = 5 * time.Second
|
||||
conformanceTests = e2eingress.CreateIngressComformanceTests(ctx, jig, ns, map[string]string{
|
||||
e2eingress.NEGAnnotation: `{"ingress": true}`,
|
||||
})
|
||||
for _, t := range conformanceTests {
|
||||
ginkgo.By(t.EntryLog)
|
||||
t.Execute()
|
||||
ginkgo.By(t.ExitLog)
|
||||
jig.WaitForIngress(ctx, true)
|
||||
err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should be able to switch between IG and NEG modes", func(ctx context.Context) {
|
||||
var err error
|
||||
propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, f.ClientSet)
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(ctx, true)
|
||||
err = gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Switch backend service to use IG")
|
||||
svcList, err := f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress": false}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = wait.PollWithContext(ctx, 5*time.Second, propagationTimeout, func(ctx context.Context) (bool, error) {
|
||||
if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(ctx, false)); err != nil {
|
||||
framework.Logf("ginkgo.Failed to verify IG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Expect backend service to target IG, but failed to observe")
|
||||
jig.WaitForIngress(ctx, true)
|
||||
|
||||
ginkgo.By("Switch backend service to use NEG")
|
||||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress": true}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = wait.PollWithContext(ctx, 5*time.Second, propagationTimeout, func(ctx context.Context) (bool, error) {
|
||||
if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(ctx, false)); err != nil {
|
||||
framework.Logf("ginkgo.Failed to verify NEG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Expect backend service to target NEG, but failed to observe")
|
||||
jig.WaitForIngress(ctx, true)
|
||||
})
|
||||
|
||||
ginkgo.It("should be able to create a ClusterIP service", func(ctx context.Context) {
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(ctx, true)
|
||||
svcPorts := jig.GetServicePorts(ctx, false)
|
||||
err := gceController.WaitForNegBackendService(ctx, svcPorts)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// ClusterIP ServicePorts have no NodePort
|
||||
for _, sp := range svcPorts {
|
||||
gomega.Expect(sp.NodePort).To(gomega.Equal(int32(0)))
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should sync endpoints to NEG", func(ctx context.Context) {
|
||||
name := "hostname"
|
||||
scaleAndValidateNEG := func(num int) {
|
||||
scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(ctx, name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if scale.Spec.Replicas != int32(num) {
|
||||
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||
scale.Spec.Replicas = int32(num)
|
||||
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(ctx, name, scale, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress(ctx)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("Expecting %d backends, got %d", num, res.Len())
|
||||
return res.Len() == num, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(ctx, true)
|
||||
jig.WaitForIngressToStable(ctx)
|
||||
err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
|
||||
framework.ExpectNoError(err)
|
||||
// initial replicas number is 1
|
||||
scaleAndValidateNEG(1)
|
||||
|
||||
ginkgo.By("Scale up number of backends to 5")
|
||||
scaleAndValidateNEG(5)
|
||||
|
||||
ginkgo.By("Scale down number of backends to 3")
|
||||
scaleAndValidateNEG(3)
|
||||
|
||||
ginkgo.By("Scale up number of backends to 6")
|
||||
scaleAndValidateNEG(6)
|
||||
|
||||
ginkgo.By("Scale down number of backends to 2")
|
||||
scaleAndValidateNEG(3)
|
||||
})
|
||||
|
||||
ginkgo.It("rolling update backend pods should not cause service disruption", func(ctx context.Context) {
|
||||
name := "hostname"
|
||||
replicas := 8
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(ctx, true)
|
||||
jig.WaitForIngressToStable(ctx)
|
||||
err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Scale backend replicas to %d", replicas))
|
||||
scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(ctx, name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||
scale.Spec.Replicas = int32(replicas)
|
||||
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(ctx, name, scale, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, f.ClientSet)
|
||||
err = wait.Poll(10*time.Second, propagationTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress(ctx)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return res.Len() == replicas, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Trigger rolling update and observe service disruption")
|
||||
deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
// trigger by changing graceful termination period to 60 seconds
|
||||
gracePeriod := int64(60)
|
||||
deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod
|
||||
_, err = f.ClientSet.AppsV1().Deployments(ns).Update(ctx, deploy, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
err = wait.Poll(10*time.Second, propagationTimeout, func() (bool, error) {
|
||||
res, err := jig.GetDistinctResponseFromIngress(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if int(deploy.Status.UpdatedReplicas) == replicas {
|
||||
if res.Len() == replicas {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Expecting %d different responses, but got %d.", replicas, res.Len())
|
||||
return false, nil
|
||||
|
||||
}
|
||||
framework.Logf("Waiting for rolling update to finished. Keep sending traffic.")
|
||||
return false, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func(ctx context.Context) {
|
||||
name := "hostname"
|
||||
expectedKeys := []int32{80, 443}
|
||||
|
||||
scaleAndValidateExposedNEG := func(num int) {
|
||||
scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(ctx, name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if scale.Spec.Replicas != int32(num) {
|
||||
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||
scale.Spec.Replicas = int32(num)
|
||||
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(ctx, name, scale, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) {
|
||||
svc, err := f.ClientSet.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
var status e2eingress.NegStatus
|
||||
v, ok := svc.Annotations[e2eingress.NEGStatusAnnotation]
|
||||
if !ok {
|
||||
// Wait for NEG sync loop to find NEGs
|
||||
framework.Logf("Waiting for %v, got: %+v", e2eingress.NEGStatusAnnotation, svc.Annotations)
|
||||
return false, nil
|
||||
}
|
||||
err = json.Unmarshal([]byte(v), &status)
|
||||
if err != nil {
|
||||
framework.Logf("Error in parsing Expose NEG annotation: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("Got %v: %v", e2eingress.NEGStatusAnnotation, v)
|
||||
|
||||
// Expect 2 NEGs to be created based on the test setup (neg-exposed)
|
||||
if len(status.NetworkEndpointGroups) != 2 {
|
||||
framework.Logf("Expected 2 NEGs, got %d", len(status.NetworkEndpointGroups))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, port := range expectedKeys {
|
||||
if _, ok := status.NetworkEndpointGroups[port]; !ok {
|
||||
framework.Logf("Expected ServicePort key %v, but does not exist", port)
|
||||
}
|
||||
}
|
||||
|
||||
if len(status.NetworkEndpointGroups) != len(expectedKeys) {
|
||||
framework.Logf("Expected length of %+v to equal length of %+v, but does not", status.NetworkEndpointGroups, expectedKeys)
|
||||
}
|
||||
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
framework.ExpectNoError(err)
|
||||
for _, neg := range status.NetworkEndpointGroups {
|
||||
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
|
||||
framework.ExpectNoError(err)
|
||||
if len(networkEndpoints) != num {
|
||||
framework.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints))
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
ginkgo.By("Create a basic HTTP ingress using NEG")
|
||||
jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(ctx, true)
|
||||
err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
|
||||
framework.ExpectNoError(err)
|
||||
// initial replicas number is 1
|
||||
scaleAndValidateExposedNEG(1)
|
||||
|
||||
ginkgo.By("Scale up number of backends to 5")
|
||||
scaleAndValidateExposedNEG(5)
|
||||
|
||||
ginkgo.By("Scale down number of backends to 3")
|
||||
scaleAndValidateExposedNEG(3)
|
||||
|
||||
ginkgo.By("Scale up number of backends to 6")
|
||||
scaleAndValidateExposedNEG(6)
|
||||
|
||||
ginkgo.By("Scale down number of backends to 2")
|
||||
scaleAndValidateExposedNEG(3)
|
||||
})
|
||||
|
||||
ginkgo.It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func(ctx context.Context) {
|
||||
ginkgo.By("Create a basic HTTP ingress using standalone NEG")
|
||||
jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
|
||||
jig.WaitForIngress(ctx, true)
|
||||
|
||||
name := "hostname"
|
||||
detectNegAnnotation(ctx, f, jig, gceController, ns, name, 2)
|
||||
|
||||
// Add Ingress annotation - NEGs should stay the same.
|
||||
ginkgo.By("Adding NEG Ingress annotation")
|
||||
svcList, err := f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
detectNegAnnotation(ctx, f, jig, gceController, ns, name, 2)
|
||||
|
||||
// Modify exposed NEG annotation, but keep ingress annotation
|
||||
ginkgo.By("Modifying exposed NEG annotation, but keep Ingress annotation")
|
||||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
detectNegAnnotation(ctx, f, jig, gceController, ns, name, 2)
|
||||
|
||||
// Remove Ingress annotation. Expect 1 NEG
|
||||
ginkgo.By("Disabling Ingress annotation, but keeping one standalone NEG")
|
||||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}`
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
detectNegAnnotation(ctx, f, jig, gceController, ns, name, 1)
|
||||
|
||||
// Remove NEG annotation entirely. Expect 0 NEGs.
|
||||
ginkgo.By("Removing NEG annotation")
|
||||
svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
for _, svc := range svcList.Items {
|
||||
delete(svc.Annotations, e2eingress.NEGAnnotation)
|
||||
// Service cannot be ClusterIP if it's using Instance Groups.
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
detectNegAnnotation(ctx, f, jig, gceController, ns, name, 0)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func detectNegAnnotation(ctx context.Context, f *framework.Framework, jig *e2eingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) {
|
||||
if err := wait.Poll(5*time.Second, negUpdateTimeout, func() (bool, error) {
|
||||
svc, err := f.ClientSet.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// if we expect no NEGs, then we should be using IGs
|
||||
if negs == 0 {
|
||||
err := gceController.BackendServiceUsingIG(jig.GetServicePorts(ctx, false))
|
||||
if err != nil {
|
||||
framework.Logf("ginkgo.Failed to validate IG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
var status e2eingress.NegStatus
|
||||
v, ok := svc.Annotations[e2eingress.NEGStatusAnnotation]
|
||||
if !ok {
|
||||
framework.Logf("Waiting for %v, got: %+v", e2eingress.NEGStatusAnnotation, svc.Annotations)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(v), &status)
|
||||
if err != nil {
|
||||
framework.Logf("Error in parsing Expose NEG annotation: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("Got %v: %v", e2eingress.NEGStatusAnnotation, v)
|
||||
|
||||
if len(status.NetworkEndpointGroups) != negs {
|
||||
framework.Logf("Expected %d NEGs, got %d", negs, len(status.NetworkEndpointGroups))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
framework.ExpectNoError(err)
|
||||
for _, neg := range status.NetworkEndpointGroups {
|
||||
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
|
||||
framework.ExpectNoError(err)
|
||||
if len(networkEndpoints) != 1 {
|
||||
framework.Logf("Expect NEG %s to exist, but got %d", neg, len(networkEndpoints))
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
err = gceController.BackendServiceUsingNEG(jig.GetServicePorts(ctx, false))
|
||||
if err != nil {
|
||||
framework.Logf("ginkgo.Failed to validate NEG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
|
20
test/e2e/network/scale/ingress_providerless.go
Normal file
20
test/e2e/network/scale/ingress_providerless.go
Normal file
@@ -0,0 +1,20 @@
|
||||
//go:build providerless
|
||||
// +build providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scale
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
|
30
test/e2e/providers.go
Normal file
30
test/e2e/providers.go
Normal file
@@ -0,0 +1,30 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
// ensure that cloud providers are loaded
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/aws"
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/azure"
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark"
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/openstack"
|
||||
_ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere"
|
||||
)
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
|
26
test/e2e/storage/constants.go
Normal file
26
test/e2e/storage/constants.go
Normal file
@@ -0,0 +1,26 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
minNodes = 2
|
||||
|
||||
// total time to wait for cloudprovider or file system resize to finish
|
||||
totalResizeWaitPeriod = 10 * time.Minute
|
||||
)
|
@@ -19,9 +19,6 @@ package storage
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -40,11 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
)
|
||||
|
||||
const (
|
||||
// total time to wait for cloudprovider or file system resize to finish
|
||||
totalResizeWaitPeriod = 10 * time.Minute
|
||||
"path"
|
||||
)
|
||||
|
||||
var _ = utils.SIGDescribe(feature.Flexvolumes, "Mounted flexvolume expand", framework.WithSlow(), func() {
|
||||
|
133
test/e2e/storage/helpers.go
Normal file
133
test/e2e/storage/helpers.go
Normal file
@@ -0,0 +1,133 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
)
|
||||
|
||||
func newStorageClass(t testsuites.StorageClassTest, ns string, prefix string) *storagev1.StorageClass {
|
||||
pluginName := t.Provisioner
|
||||
if pluginName == "" {
|
||||
pluginName = getDefaultPluginName()
|
||||
}
|
||||
if prefix == "" {
|
||||
prefix = "sc"
|
||||
}
|
||||
bindingMode := storagev1.VolumeBindingImmediate
|
||||
if t.DelayBinding {
|
||||
bindingMode = storagev1.VolumeBindingWaitForFirstConsumer
|
||||
}
|
||||
if t.Parameters == nil {
|
||||
t.Parameters = make(map[string]string)
|
||||
}
|
||||
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
// fstype might be forced from outside, in that case skip setting a default
|
||||
if _, exists := t.Parameters["fstype"]; !exists {
|
||||
t.Parameters["fstype"] = e2epv.GetDefaultFSType()
|
||||
framework.Logf("settings a default fsType=%s in the storage class", t.Parameters["fstype"])
|
||||
}
|
||||
}
|
||||
|
||||
sc := getStorageClass(pluginName, t.Parameters, &bindingMode, t.MountOptions, ns, prefix)
|
||||
if t.AllowVolumeExpansion {
|
||||
sc.AllowVolumeExpansion = &t.AllowVolumeExpansion
|
||||
}
|
||||
return sc
|
||||
}
|
||||
|
||||
func getDefaultPluginName() string {
|
||||
switch {
|
||||
case framework.ProviderIs("gke"), framework.ProviderIs("gce"):
|
||||
return "kubernetes.io/gce-pd"
|
||||
case framework.ProviderIs("aws"):
|
||||
return "kubernetes.io/aws-ebs"
|
||||
case framework.ProviderIs("openstack"):
|
||||
return "kubernetes.io/cinder"
|
||||
case framework.ProviderIs("vsphere"):
|
||||
return "kubernetes.io/vsphere-volume"
|
||||
case framework.ProviderIs("azure"):
|
||||
return "kubernetes.io/azure-disk"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getStorageClass(
|
||||
provisioner string,
|
||||
parameters map[string]string,
|
||||
bindingMode *storagev1.VolumeBindingMode,
|
||||
mountOptions []string,
|
||||
ns string,
|
||||
prefix string,
|
||||
) *storagev1.StorageClass {
|
||||
if bindingMode == nil {
|
||||
defaultBindingMode := storagev1.VolumeBindingImmediate
|
||||
bindingMode = &defaultBindingMode
|
||||
}
|
||||
return &storagev1.StorageClass{
|
||||
TypeMeta: v12.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: v12.ObjectMeta{
|
||||
// Name must be unique, so let's base it on namespace name and the prefix (the prefix is test specific)
|
||||
GenerateName: ns + "-" + prefix,
|
||||
},
|
||||
Provisioner: provisioner,
|
||||
Parameters: parameters,
|
||||
VolumeBindingMode: bindingMode,
|
||||
MountOptions: mountOptions,
|
||||
}
|
||||
}
|
||||
|
||||
func waitForDeploymentToRecreatePod(ctx context.Context, client kubernetes.Interface, deployment *appsv1.Deployment) (v1.Pod, error) {
|
||||
var runningPod v1.Pod
|
||||
waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
podList, err := e2edeployment.GetPodsForDeployment(ctx, client, deployment)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pods for deployment: %w", err)
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodRunning:
|
||||
runningPod = pod
|
||||
return true, nil
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return false, conditions.ErrPodCompleted
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if waitErr != nil {
|
||||
return runningPod, fmt.Errorf("error waiting for recreated pod: %v", waitErr)
|
||||
}
|
||||
return runningPod, nil
|
||||
}
|
@@ -18,12 +18,8 @@ package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -31,9 +27,7 @@ import (
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
"k8s.io/kubernetes/test/e2e/feature"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
@@ -168,27 +162,3 @@ var _ = utils.SIGDescribe("Mounted volume expand", feature.StorageProvider, func
|
||||
gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
|
||||
})
|
||||
})
|
||||
|
||||
func waitForDeploymentToRecreatePod(ctx context.Context, client clientset.Interface, deployment *appsv1.Deployment) (v1.Pod, error) {
|
||||
var runningPod v1.Pod
|
||||
waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
podList, err := e2edeployment.GetPodsForDeployment(ctx, client, deployment)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pods for deployment: %w", err)
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodRunning:
|
||||
runningPod = pod
|
||||
return true, nil
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return false, conditions.ErrPodCompleted
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if waitErr != nil {
|
||||
return runningPod, fmt.Errorf("error waiting for recreated pod: %v", waitErr)
|
||||
}
|
||||
return runningPod, nil
|
||||
}
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
@@ -55,7 +58,6 @@ const (
|
||||
nodeStatusTimeout = 10 * time.Minute
|
||||
nodeStatusPollTime = 1 * time.Second
|
||||
podEvictTimeout = 2 * time.Minute
|
||||
minNodes = 2
|
||||
)
|
||||
|
||||
var _ = utils.SIGDescribe("Pod Disks", feature.StorageProvider, func() {
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
@@ -27,7 +30,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -721,80 +723,6 @@ func updateDefaultStorageClass(ctx context.Context, c clientset.Interface, scNam
|
||||
verifyDefaultStorageClass(ctx, c, scName, expectedDefault)
|
||||
}
|
||||
|
||||
func getDefaultPluginName() string {
|
||||
switch {
|
||||
case framework.ProviderIs("gke"), framework.ProviderIs("gce"):
|
||||
return "kubernetes.io/gce-pd"
|
||||
case framework.ProviderIs("aws"):
|
||||
return "kubernetes.io/aws-ebs"
|
||||
case framework.ProviderIs("openstack"):
|
||||
return "kubernetes.io/cinder"
|
||||
case framework.ProviderIs("vsphere"):
|
||||
return "kubernetes.io/vsphere-volume"
|
||||
case framework.ProviderIs("azure"):
|
||||
return "kubernetes.io/azure-disk"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func newStorageClass(t testsuites.StorageClassTest, ns string, prefix string) *storagev1.StorageClass {
|
||||
pluginName := t.Provisioner
|
||||
if pluginName == "" {
|
||||
pluginName = getDefaultPluginName()
|
||||
}
|
||||
if prefix == "" {
|
||||
prefix = "sc"
|
||||
}
|
||||
bindingMode := storagev1.VolumeBindingImmediate
|
||||
if t.DelayBinding {
|
||||
bindingMode = storagev1.VolumeBindingWaitForFirstConsumer
|
||||
}
|
||||
if t.Parameters == nil {
|
||||
t.Parameters = make(map[string]string)
|
||||
}
|
||||
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
// fstype might be forced from outside, in that case skip setting a default
|
||||
if _, exists := t.Parameters["fstype"]; !exists {
|
||||
t.Parameters["fstype"] = e2epv.GetDefaultFSType()
|
||||
framework.Logf("settings a default fsType=%s in the storage class", t.Parameters["fstype"])
|
||||
}
|
||||
}
|
||||
|
||||
sc := getStorageClass(pluginName, t.Parameters, &bindingMode, t.MountOptions, ns, prefix)
|
||||
if t.AllowVolumeExpansion {
|
||||
sc.AllowVolumeExpansion = &t.AllowVolumeExpansion
|
||||
}
|
||||
return sc
|
||||
}
|
||||
|
||||
func getStorageClass(
|
||||
provisioner string,
|
||||
parameters map[string]string,
|
||||
bindingMode *storagev1.VolumeBindingMode,
|
||||
mountOptions []string,
|
||||
ns string,
|
||||
prefix string,
|
||||
) *storagev1.StorageClass {
|
||||
if bindingMode == nil {
|
||||
defaultBindingMode := storagev1.VolumeBindingImmediate
|
||||
bindingMode = &defaultBindingMode
|
||||
}
|
||||
return &storagev1.StorageClass{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Name must be unique, so let's base it on namespace name and the prefix (the prefix is test specific)
|
||||
GenerateName: ns + "-" + prefix,
|
||||
},
|
||||
Provisioner: provisioner,
|
||||
Parameters: parameters,
|
||||
VolumeBindingMode: bindingMode,
|
||||
MountOptions: mountOptions,
|
||||
}
|
||||
}
|
||||
|
||||
// waitForProvisionedVolumesDelete is a polling wrapper to scan all PersistentVolumes for any associated to the test's
|
||||
// StorageClass. Returns either an error and nil values or the remaining PVs and their count.
|
||||
func waitForProvisionedVolumesDeleted(ctx context.Context, c clientset.Interface, scName string) ([]*v1.PersistentVolume, error) {
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
|
Reference in New Issue
Block a user