Remove gcp in-tree cloud provider and credential provider
Signed-off-by: Davanum Srinivas <davanum@gmail.com>
This commit is contained in:
@@ -1,157 +0,0 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipamperf
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud"
|
||||
"github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta"
|
||||
beta "google.golang.org/api/compute/v0.beta"
|
||||
ga "google.golang.org/api/compute/v1"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
"k8s.io/kubernetes/test/integration/util"
|
||||
)
|
||||
|
||||
// implemntation note:
|
||||
// ------------------
|
||||
// cloud.go implements hooks and handler functions for the MockGCE cloud in order to meet expectations
|
||||
// of cloud behavior from the IPAM controllers. The key constraint is that the IPAM code is spread
|
||||
// across both GA and Beta instances, which are distinct objects in the mock. We need to solve for
|
||||
//
|
||||
// 1. When a GET is called on an instance, we lazy create the instance with or without an assigned
|
||||
// ip alias as needed by the IPAM controller type
|
||||
// 2. When we assign an IP alias for an instance, both the GA and Beta instance have to agree on the
|
||||
// assigned alias range
|
||||
//
|
||||
// We solve both the problems by using a baseInstanceList which maintains a list of known instances,
|
||||
// and their pre-assigned ip-alias ranges (if needed). We then create GetHook for GA and Beta GetInstance
|
||||
// calls as closures over this betaInstanceList that can lookup base instance data.
|
||||
//
|
||||
// This has the advantage that once the Get hook pouplates the GCEMock with the base data, we then let the
|
||||
// rest of the mock code run as is.
|
||||
|
||||
// baseInstance tracks basic instance data needed by the IPAM controllers
|
||||
type baseInstance struct {
|
||||
name string
|
||||
zone string
|
||||
aliasRange string
|
||||
}
|
||||
|
||||
// baseInstanceList tracks a set of base instances
|
||||
type baseInstanceList struct {
|
||||
allocateCIDR bool
|
||||
clusterCIDR *net.IPNet
|
||||
subnetMaskSize int
|
||||
cidrSet *cidrset.CidrSet
|
||||
|
||||
lock sync.Mutex // protect access to instances
|
||||
instances map[meta.Key]*baseInstance
|
||||
}
|
||||
|
||||
// toGA is an utility method to return the baseInstance data as a GA Instance object
|
||||
func (bi *baseInstance) toGA() *ga.Instance {
|
||||
inst := &ga.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*ga.NetworkInterface{{}}}
|
||||
if bi.aliasRange != "" {
|
||||
inst.NetworkInterfaces[0].AliasIpRanges = []*ga.AliasIpRange{
|
||||
{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},
|
||||
}
|
||||
}
|
||||
return inst
|
||||
}
|
||||
|
||||
// toGA is an utility method to return the baseInstance data as a beta Instance object
|
||||
func (bi *baseInstance) toBeta() *beta.Instance {
|
||||
inst := &beta.Instance{Name: bi.name, Zone: bi.zone, NetworkInterfaces: []*beta.NetworkInterface{{}}}
|
||||
if bi.aliasRange != "" {
|
||||
inst.NetworkInterfaces[0].AliasIpRanges = []*beta.AliasIpRange{
|
||||
{IpCidrRange: bi.aliasRange, SubnetworkRangeName: util.TestSecondaryRangeName},
|
||||
}
|
||||
}
|
||||
return inst
|
||||
}
|
||||
|
||||
// newBaseInstanceList is the baseInstanceList constructor
|
||||
func newBaseInstanceList(allocateCIDR bool, clusterCIDR *net.IPNet, subnetMaskSize int) *baseInstanceList {
|
||||
cidrSet, _ := cidrset.NewCIDRSet(clusterCIDR, subnetMaskSize)
|
||||
return &baseInstanceList{
|
||||
allocateCIDR: allocateCIDR,
|
||||
clusterCIDR: clusterCIDR,
|
||||
subnetMaskSize: subnetMaskSize,
|
||||
cidrSet: cidrSet,
|
||||
instances: make(map[meta.Key]*baseInstance),
|
||||
}
|
||||
}
|
||||
|
||||
// getOrCreateBaseInstance lazily creates a new base instance, assigning if allocateCIDR is true
|
||||
func (bil *baseInstanceList) getOrCreateBaseInstance(key *meta.Key) *baseInstance {
|
||||
bil.lock.Lock()
|
||||
defer bil.lock.Unlock()
|
||||
|
||||
inst, found := bil.instances[*key]
|
||||
if !found {
|
||||
inst = &baseInstance{name: key.Name, zone: key.Zone}
|
||||
if bil.allocateCIDR {
|
||||
nextRange, _ := bil.cidrSet.AllocateNext()
|
||||
inst.aliasRange = nextRange.String()
|
||||
}
|
||||
bil.instances[*key] = inst
|
||||
}
|
||||
return inst
|
||||
}
|
||||
|
||||
// newGAGetHook creates a new closure with the current baseInstanceList to be used as a MockInstances.GetHook
|
||||
func (bil *baseInstanceList) newGAGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {
|
||||
return func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *ga.Instance, error) {
|
||||
m.Lock.Lock()
|
||||
defer m.Lock.Unlock()
|
||||
|
||||
if _, found := m.Objects[*key]; !found {
|
||||
m.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toGA()}
|
||||
}
|
||||
return false, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// newBetaGetHook creates a new closure with the current baseInstanceList to be used as a MockBetaInstances.GetHook
|
||||
func (bil *baseInstanceList) newBetaGetHook() func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {
|
||||
return func(ctx context.Context, key *meta.Key, m *cloud.MockBetaInstances) (bool, *beta.Instance, error) {
|
||||
m.Lock.Lock()
|
||||
defer m.Lock.Unlock()
|
||||
|
||||
if _, found := m.Objects[*key]; !found {
|
||||
m.Objects[*key] = &cloud.MockInstancesObj{Obj: bil.getOrCreateBaseInstance(key).toBeta()}
|
||||
}
|
||||
return false, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// newMockCloud returns a mock GCE instance with the appropriate handlers hooks
|
||||
func (bil *baseInstanceList) newMockCloud() cloud.Cloud {
|
||||
c := cloud.NewMockGCE(nil)
|
||||
|
||||
// insert hooks to lazy create a instance when needed
|
||||
c.MockInstances.GetHook = bil.newGAGetHook()
|
||||
c.MockBetaInstances.GetHook = bil.newBetaGetHook()
|
||||
|
||||
return c
|
||||
}
|
||||
@@ -1,179 +0,0 @@
|
||||
//go:build !providerless
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipamperf
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/klog/v2/ktesting"
|
||||
netutils "k8s.io/utils/net"
|
||||
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"k8s.io/kubernetes/test/integration/util"
|
||||
)
|
||||
|
||||
func setupAllocator(ctx context.Context, kubeConfig *restclient.Config, config *Config, clusterCIDR, serviceCIDR *net.IPNet, subnetMaskSize int) (*clientset.Clientset, util.ShutdownFunc, error) {
|
||||
controllerStopChan := make(chan struct{})
|
||||
shutdownFunc := func() {
|
||||
close(controllerStopChan)
|
||||
}
|
||||
|
||||
clientConfig := restclient.CopyConfig(kubeConfig)
|
||||
clientConfig.QPS = float32(config.KubeQPS)
|
||||
clientConfig.Burst = config.KubeQPS
|
||||
clientSet := clientset.NewForConfigOrDie(clientConfig)
|
||||
|
||||
sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour)
|
||||
ipamController, err := nodeipam.NewNodeIpamController(
|
||||
ctx,
|
||||
sharedInformer.Core().V1().Nodes(),
|
||||
config.Cloud, clientSet, []*net.IPNet{clusterCIDR}, serviceCIDR, nil,
|
||||
[]int{subnetMaskSize}, config.AllocatorType,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, shutdownFunc, err
|
||||
}
|
||||
go ipamController.Run(ctx)
|
||||
sharedInformer.Start(controllerStopChan)
|
||||
|
||||
return clientSet, shutdownFunc, nil
|
||||
}
|
||||
|
||||
func runTest(t *testing.T, kubeConfig *restclient.Config, config *Config, clusterCIDR, serviceCIDR *net.IPNet, subnetMaskSize int) (*Results, error) {
|
||||
t.Helper()
|
||||
klog.Infof("Running test %s", t.Name())
|
||||
|
||||
nodeClientConfig := restclient.CopyConfig(kubeConfig)
|
||||
nodeClientConfig.QPS = float32(config.CreateQPS)
|
||||
nodeClientConfig.Burst = config.CreateQPS
|
||||
nodeClient := clientset.NewForConfigOrDie(nodeClientConfig)
|
||||
|
||||
defer deleteNodes(nodeClient) // cleanup nodes on after controller shutdown
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
clientSet, shutdownFunc, err := setupAllocator(ctx, kubeConfig, config, clusterCIDR, serviceCIDR, subnetMaskSize)
|
||||
if err != nil {
|
||||
t.Fatalf("Error starting IPAM allocator: %v", err)
|
||||
}
|
||||
defer shutdownFunc()
|
||||
|
||||
o := NewObserver(clientSet, config.NumNodes)
|
||||
if err := o.StartObserving(); err != nil {
|
||||
t.Fatalf("Could not start test observer: %v", err)
|
||||
}
|
||||
|
||||
if err := createNodes(nodeClient, config); err != nil {
|
||||
t.Fatalf("Could not create nodes: %v", err)
|
||||
}
|
||||
|
||||
results := o.Results(t.Name(), config)
|
||||
klog.Infof("Results: %s", results)
|
||||
if !results.Succeeded {
|
||||
t.Errorf("%s: Not allocations succeeded", t.Name())
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func logResults(allResults []*Results) {
|
||||
jStr, err := json.MarshalIndent(allResults, "", " ")
|
||||
if err != nil {
|
||||
klog.Errorf("Error formatting results: %v", err)
|
||||
return
|
||||
}
|
||||
if resultsLogFile != "" {
|
||||
klog.Infof("Logging results to %s", resultsLogFile)
|
||||
if err := os.WriteFile(resultsLogFile, jStr, os.FileMode(0644)); err != nil {
|
||||
klog.Errorf("Error logging results to %s: %v", resultsLogFile, err)
|
||||
}
|
||||
}
|
||||
klog.Infof("AllResults:\n%s", string(jStr))
|
||||
}
|
||||
|
||||
func TestPerformance(t *testing.T) {
|
||||
// TODO (#93112) skip test until appropriate timeout established
|
||||
if testing.Short() || true {
|
||||
// TODO (#61854) find why flakiness is caused by etcd connectivity before enabling always
|
||||
t.Skip("Skipping because we want to run short tests")
|
||||
}
|
||||
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
_, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
|
||||
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"}
|
||||
},
|
||||
})
|
||||
defer tearDownFn()
|
||||
|
||||
_, clusterCIDR, _ := netutils.ParseCIDRSloppy("10.96.0.0/11") // allows up to 8K nodes
|
||||
_, serviceCIDR, _ := netutils.ParseCIDRSloppy("10.94.0.0/24") // does not matter for test - pick upto 250 services
|
||||
subnetMaskSize := 24
|
||||
|
||||
var (
|
||||
allResults []*Results
|
||||
tests []*Config
|
||||
)
|
||||
|
||||
if isCustom {
|
||||
tests = append(tests, customConfig)
|
||||
} else {
|
||||
for _, numNodes := range []int{10, 100} {
|
||||
for _, alloc := range []ipam.CIDRAllocatorType{ipam.RangeAllocatorType, ipam.CloudAllocatorType, ipam.IPAMFromClusterAllocatorType, ipam.IPAMFromCloudAllocatorType} {
|
||||
tests = append(tests, &Config{AllocatorType: alloc, NumNodes: numNodes, CreateQPS: numNodes, KubeQPS: 10, CloudQPS: 10})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
testName := fmt.Sprintf("%s-KubeQPS%d-Nodes%d", test.AllocatorType, test.KubeQPS, test.NumNodes)
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
allocateCIDR := false
|
||||
if test.AllocatorType == ipam.IPAMFromCloudAllocatorType || test.AllocatorType == ipam.CloudAllocatorType {
|
||||
allocateCIDR = true
|
||||
}
|
||||
bil := newBaseInstanceList(allocateCIDR, clusterCIDR, subnetMaskSize)
|
||||
cloud, err := util.NewMockGCECloud(bil.newMockCloud())
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to create mock cloud: %v", err)
|
||||
}
|
||||
test.Cloud = cloud
|
||||
if results, err := runTest(t, kubeConfig, test, clusterCIDR, serviceCIDR, subnetMaskSize); err == nil {
|
||||
allResults = append(allResults, results)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
logResults(allResults)
|
||||
}
|
||||
Reference in New Issue
Block a user