switch to e2 machine types
This commit is contained in:
		| @@ -27,10 +27,7 @@ function get-num-nodes { | ||||
| #   NUM_NODES | ||||
| #   NUM_WINDOWS_NODES | ||||
| function get-master-size { | ||||
|   local suggested_master_size=1 | ||||
|   if [[ "$(get-num-nodes)" -gt "5" ]]; then | ||||
|     suggested_master_size=2 | ||||
|   fi | ||||
|   local suggested_master_size=2 | ||||
|   if [[ "$(get-num-nodes)" -gt "10" ]]; then | ||||
|     suggested_master_size=4 | ||||
|   fi | ||||
| @@ -43,9 +40,6 @@ function get-master-size { | ||||
|   if [[ "$(get-num-nodes)" -gt "500" ]]; then | ||||
|     suggested_master_size=32 | ||||
|   fi | ||||
|   if [[ "$(get-num-nodes)" -gt "2000" ]]; then | ||||
|     suggested_master_size=64 | ||||
|   fi | ||||
|   echo "${suggested_master_size}" | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -27,12 +27,10 @@ ZONE=${KUBE_GCE_ZONE:-us-central1-b} | ||||
| export REGION=${ZONE%-*} | ||||
| RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false} | ||||
| REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true} | ||||
| # TODO: Migrate to e2-standard machine family. | ||||
| NODE_SIZE=${NODE_SIZE:-n1-standard-2} | ||||
| NODE_SIZE=${NODE_SIZE:-e2-standard-2} | ||||
| NUM_NODES=${NUM_NODES:-3} | ||||
| NUM_WINDOWS_NODES=${NUM_WINDOWS_NODES:-0} | ||||
| # TODO: Migrate to e2-standard machine family. | ||||
| MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)} | ||||
| MASTER_SIZE=${MASTER_SIZE:-e2-standard-$(get-master-size)} | ||||
| MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures. | ||||
| export MASTER_DISK_TYPE=pd-ssd | ||||
| MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)} | ||||
|   | ||||
| @@ -26,12 +26,10 @@ ZONE=${KUBE_GCE_ZONE:-us-central1-b} | ||||
| export REGION=${ZONE%-*} | ||||
| RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false} | ||||
| REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true} | ||||
| # TODO: Migrate to e2-standard machine family. | ||||
| NODE_SIZE=${NODE_SIZE:-n1-standard-2} | ||||
| NODE_SIZE=${NODE_SIZE:-e2-standard-2} | ||||
| NUM_NODES=${NUM_NODES:-3} | ||||
| NUM_WINDOWS_NODES=${NUM_WINDOWS_NODES:-0} | ||||
| # TODO: Migrate to e2-standard machine family. | ||||
| MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)} | ||||
| MASTER_SIZE=${MASTER_SIZE:-e2-standard-$(get-master-size)} | ||||
| MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures. | ||||
| export MASTER_DISK_TYPE=pd-ssd | ||||
| MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)} | ||||
|   | ||||
| @@ -33,7 +33,7 @@ skip=${SKIP-"\[Flaky\]|\[Slow\]|\[Serial\]"} | ||||
| # The number of tests that can run in parallel depends on what tests | ||||
| # are running and on the size of the node. Too many, and tests will | ||||
| # fail due to resource contention. 8 is a reasonable default for a | ||||
| # n1-standard-1 node. | ||||
| # e2-standard-2 node. | ||||
| # Currently, parallelism only affects when REMOTE=true. For local test, | ||||
| # ginkgo default parallelism (cores - 1) is used. | ||||
| parallelism=${PARALLELISM:-8} | ||||
|   | ||||
| @@ -375,9 +375,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { | ||||
| 	ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("gke") | ||||
|  | ||||
| 		ginkgo.By("Creating new node-pool with n1-standard-4 machines") | ||||
| 		ginkgo.By("Creating new node-pool with e2-standard-4 machines") | ||||
| 		const extraPoolName = "extra-pool" | ||||
| 		addNodePool(extraPoolName, "n1-standard-4", 1) | ||||
| 		addNodePool(extraPoolName, "e2-standard-4", 1) | ||||
| 		defer deleteNodePool(extraPoolName) | ||||
| 		extraNodes := getPoolInitialSize(extraPoolName) | ||||
| 		framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout)) | ||||
| @@ -409,9 +409,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { | ||||
| 	ginkgo.It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("gke") | ||||
|  | ||||
| 		ginkgo.By("Creating new node-pool with n1-standard-4 machines") | ||||
| 		ginkgo.By("Creating new node-pool with e2-standard-4 machines") | ||||
| 		const extraPoolName = "extra-pool" | ||||
| 		addNodePool(extraPoolName, "n1-standard-4", 1) | ||||
| 		addNodePool(extraPoolName, "e2-standard-4", 1) | ||||
| 		defer deleteNodePool(extraPoolName) | ||||
| 		extraNodes := getPoolInitialSize(extraPoolName) | ||||
| 		framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout)) | ||||
| @@ -641,9 +641,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { | ||||
| 	ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("gke") | ||||
|  | ||||
| 		ginkgo.By("Creating new node-pool with n1-standard-4 machines") | ||||
| 		ginkgo.By("Creating new node-pool with e2-standard-4 machines") | ||||
| 		const extraPoolName = "extra-pool" | ||||
| 		addNodePool(extraPoolName, "n1-standard-4", 1) | ||||
| 		addNodePool(extraPoolName, "e2-standard-4", 1) | ||||
| 		defer deleteNodePool(extraPoolName) | ||||
| 		extraNodes := getPoolInitialSize(extraPoolName) | ||||
| 		framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout)) | ||||
| @@ -697,7 +697,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { | ||||
| 		increasedSize := manuallyIncreaseClusterSize(ctx, f, originalSizes) | ||||
|  | ||||
| 		const extraPoolName = "extra-pool" | ||||
| 		addNodePool(extraPoolName, "n1-standard-1", 3) | ||||
| 		addNodePool(extraPoolName, "e2-standard-2", 3) | ||||
| 		defer deleteNodePool(extraPoolName) | ||||
| 		extraNodes := getPoolInitialSize(extraPoolName) | ||||
|  | ||||
| @@ -753,7 +753,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { | ||||
| 			// GKE-specific setup | ||||
| 			ginkgo.By("Add a new node pool with 0 nodes and min size 0") | ||||
| 			const extraPoolName = "extra-pool" | ||||
| 			addNodePool(extraPoolName, "n1-standard-4", 0) | ||||
| 			addNodePool(extraPoolName, "e2-standard-4", 0) | ||||
| 			defer deleteNodePool(extraPoolName) | ||||
| 			framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1)) | ||||
| 			defer disableAutoscaler(extraPoolName, 0, 1) | ||||
| @@ -813,7 +813,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { | ||||
| 		// GKE-specific setup | ||||
| 		ginkgo.By("Add a new node pool with size 1 and min size 0") | ||||
| 		const extraPoolName = "extra-pool" | ||||
| 		addNodePool(extraPoolName, "n1-standard-4", 1) | ||||
| 		addNodePool(extraPoolName, "e2-standard-4", 1) | ||||
| 		defer deleteNodePool(extraPoolName) | ||||
| 		extraNodes := getPoolInitialSize(extraPoolName) | ||||
| 		framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout)) | ||||
|   | ||||
| @@ -85,8 +85,8 @@ func init() { | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	defaultGCEMachine             = "n1-standard-1" | ||||
| 	acceleratorTypeResourceFormat = "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/acceleratorTypes/%s" | ||||
| 	defaultGCEMachine             = "e2-standard-2" | ||||
| 	acceleratorTypeResourceFormat = "https://www.googleapis.com/compute/beta/projects/%s/zones/%s/acceleratorTypes/%s" | ||||
| ) | ||||
|  | ||||
| type GCERunner struct { | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 upodroid
					upodroid