fix: enable testifylint on pkg/controller
				
					
				
			Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
		| @@ -58,6 +58,7 @@ import ( | ||||
|  | ||||
| 	"github.com/google/go-cmp/cmp" | ||||
| 	"github.com/stretchr/testify/assert" | ||||
| 	"github.com/stretchr/testify/require" | ||||
| ) | ||||
|  | ||||
| // NewFakeControllerExpectationsLookup creates a fake store for PodExpectations. | ||||
| @@ -183,7 +184,7 @@ func TestControllerExpectations(t *testing.T) { | ||||
|  | ||||
| 	// RC fires off adds and deletes at apiserver, then sets expectations | ||||
| 	rcKey, err := KeyFunc(rc) | ||||
| 	assert.NoError(t, err, "Couldn't get key for object %#v: %v", rc, err) | ||||
| 	require.NoError(t, err, "Couldn't get key for object %#v: %v", rc, err) | ||||
|  | ||||
| 	e.SetExpectations(logger, rcKey, adds, dels) | ||||
| 	var wg sync.WaitGroup | ||||
| @@ -238,7 +239,7 @@ func TestControllerExpectations(t *testing.T) { | ||||
| 				e.SetExpectations(logger, rcKey, test.expectationsToSet[0], test.expectationsToSet[1]) | ||||
| 			} | ||||
| 			podExp, exists, err := e.GetExpectations(rcKey) | ||||
| 			assert.NoError(t, err, "Could not get expectations for rc, exists %v and err %v", exists, err) | ||||
| 			require.NoError(t, err, "Could not get expectations for rc, exists %v and err %v", exists, err) | ||||
| 			assert.True(t, exists, "Could not get expectations for rc, exists %v and err %v", exists, err) | ||||
|  | ||||
| 			add, del := podExp.GetExpectations() | ||||
| @@ -382,12 +383,12 @@ func TestCreatePodsWithGenerateName(t *testing.T) { | ||||
| 			} | ||||
|  | ||||
| 			err := test.podCreationFunc(podControl) | ||||
| 			assert.NoError(t, err, "unexpected error: %v", err) | ||||
| 			require.NoError(t, err, "unexpected error: %v", err) | ||||
|  | ||||
| 			fakeHandler.ValidateRequest(t, "/api/v1/namespaces/default/pods", "POST", nil) | ||||
| 			var actualPod = &v1.Pod{} | ||||
| 			err = json.Unmarshal([]byte(fakeHandler.RequestBody), actualPod) | ||||
| 			assert.NoError(t, err, "unexpected error: %v", err) | ||||
| 			require.NoError(t, err, "unexpected error: %v", err) | ||||
| 			assert.True(t, apiequality.Semantic.DeepDerivative(test.wantPod, actualPod), | ||||
| 				"Body: %s", fakeHandler.RequestBody) | ||||
| 		}) | ||||
| @@ -426,10 +427,10 @@ func TestCountTerminatingPods(t *testing.T) { | ||||
|  | ||||
| 	terminatingPods := CountTerminatingPods(podPointers) | ||||
|  | ||||
| 	assert.Equal(t, terminatingPods, int32(2)) | ||||
| 	assert.Equal(t, int32(2), terminatingPods) | ||||
|  | ||||
| 	terminatingList := FilterTerminatingPods(podPointers) | ||||
| 	assert.Equal(t, len(terminatingList), int(2)) | ||||
| 	assert.Len(t, terminatingList, int(2)) | ||||
| } | ||||
|  | ||||
| func TestActivePodFiltering(t *testing.T) { | ||||
| @@ -957,7 +958,7 @@ func TestRemoveTaintOffNode(t *testing.T) { | ||||
| 	for _, test := range tests { | ||||
| 		node, _ := test.nodeHandler.Get(context.TODO(), test.nodeName, metav1.GetOptions{}) | ||||
| 		err := RemoveTaintOffNode(context.TODO(), test.nodeHandler, test.nodeName, node, test.taintsToRemove...) | ||||
| 		assert.NoError(t, err, "%s: RemoveTaintOffNode() error = %v", test.name, err) | ||||
| 		require.NoError(t, err, "%s: RemoveTaintOffNode() error = %v", test.name, err) | ||||
|  | ||||
| 		node, _ = test.nodeHandler.Get(context.TODO(), test.nodeName, metav1.GetOptions{}) | ||||
| 		assert.EqualValues(t, test.expectedTaints, node.Spec.Taints, | ||||
| @@ -1196,7 +1197,7 @@ func TestAddOrUpdateTaintOnNode(t *testing.T) { | ||||
| 			assert.Equal(t, test.expectedErr, err, "AddOrUpdateTaintOnNode get unexpected error") | ||||
| 			continue | ||||
| 		} | ||||
| 		assert.NoError(t, err, "%s: AddOrUpdateTaintOnNode() error = %v", test.name, err) | ||||
| 		require.NoError(t, err, "%s: AddOrUpdateTaintOnNode() error = %v", test.name, err) | ||||
|  | ||||
| 		node, _ := test.nodeHandler.Get(context.TODO(), test.nodeName, metav1.GetOptions{}) | ||||
| 		assert.EqualValues(t, test.expectedTaints, node.Spec.Taints, | ||||
|   | ||||
| @@ -194,7 +194,7 @@ func TestSyncServiceNoSelector(t *testing.T) { | ||||
|  | ||||
| 	logger, _ := ktesting.NewTestContext(t) | ||||
| 	err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName)) | ||||
| 	assert.NoError(t, err) | ||||
| 	require.NoError(t, err) | ||||
| 	assert.Empty(t, client.Actions()) | ||||
| } | ||||
|  | ||||
| @@ -255,17 +255,17 @@ func TestServiceExternalNameTypeSync(t *testing.T) { | ||||
|  | ||||
| 			pod := newPod(1, namespace, true, 0, false) | ||||
| 			err := esController.podStore.Add(pod) | ||||
| 			assert.NoError(t, err) | ||||
| 			require.NoError(t, err) | ||||
|  | ||||
| 			err = esController.serviceStore.Add(tc.service) | ||||
| 			assert.NoError(t, err) | ||||
| 			require.NoError(t, err) | ||||
|  | ||||
| 			err = esController.syncService(logger, fmt.Sprintf("%s/%s", namespace, serviceName)) | ||||
| 			assert.NoError(t, err) | ||||
| 			require.NoError(t, err) | ||||
| 			assert.Empty(t, client.Actions()) | ||||
|  | ||||
| 			sliceList, err := client.DiscoveryV1().EndpointSlices(namespace).List(context.TODO(), metav1.ListOptions{}) | ||||
| 			assert.NoError(t, err) | ||||
| 			require.NoError(t, err) | ||||
| 			assert.Empty(t, sliceList.Items, "Expected 0 endpoint slices") | ||||
| 		}) | ||||
| 	} | ||||
| @@ -287,7 +287,7 @@ func TestSyncServicePendingDeletion(t *testing.T) { | ||||
|  | ||||
| 	logger, _ := ktesting.NewTestContext(t) | ||||
| 	err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName)) | ||||
| 	assert.NoError(t, err) | ||||
| 	require.NoError(t, err) | ||||
| 	assert.Empty(t, client.Actions()) | ||||
| } | ||||
|  | ||||
| @@ -300,7 +300,7 @@ func TestSyncServiceWithSelector(t *testing.T) { | ||||
| 	expectActions(t, client.Actions(), 1, "create", "endpointslices") | ||||
|  | ||||
| 	sliceList, err := client.DiscoveryV1().EndpointSlices(ns).List(context.TODO(), metav1.ListOptions{}) | ||||
| 	assert.Nil(t, err, "Expected no error fetching endpoint slices") | ||||
| 	require.NoError(t, err, "Expected no error fetching endpoint slices") | ||||
| 	assert.Len(t, sliceList.Items, 1, "Expected 1 endpoint slices") | ||||
| 	slice := sliceList.Items[0] | ||||
| 	assert.Regexp(t, "^"+serviceName, slice.Name) | ||||
| @@ -338,7 +338,7 @@ func TestSyncServiceMissing(t *testing.T) { | ||||
| 	err := esController.syncService(logger, fmt.Sprintf("%s/%s", namespace, missingServiceName)) | ||||
|  | ||||
| 	// nil should be returned when the service doesn't exist | ||||
| 	assert.Nil(t, err, "Expected no error syncing service") | ||||
| 	require.NoError(t, err, "Expected no error syncing service") | ||||
|  | ||||
| 	// That should mean no client actions were performed | ||||
| 	assert.Empty(t, client.Actions()) | ||||
| @@ -368,13 +368,13 @@ func TestSyncServicePodSelection(t *testing.T) { | ||||
|  | ||||
| 	// an endpoint slice should be created, it should only reference pod1 (not pod2) | ||||
| 	slices, err := client.DiscoveryV1().EndpointSlices(ns).List(context.TODO(), metav1.ListOptions{}) | ||||
| 	assert.Nil(t, err, "Expected no error fetching endpoint slices") | ||||
| 	require.NoError(t, err, "Expected no error fetching endpoint slices") | ||||
| 	assert.Len(t, slices.Items, 1, "Expected 1 endpoint slices") | ||||
| 	slice := slices.Items[0] | ||||
| 	assert.Len(t, slice.Endpoints, 1, "Expected 1 endpoint in first slice") | ||||
| 	assert.NotEmpty(t, slice.Annotations[v1.EndpointsLastChangeTriggerTime]) | ||||
| 	endpoint := slice.Endpoints[0] | ||||
| 	assert.EqualValues(t, endpoint.TargetRef, &v1.ObjectReference{Kind: "Pod", Namespace: ns, Name: pod1.Name}) | ||||
| 	assert.EqualValues(t, &v1.ObjectReference{Kind: "Pod", Namespace: ns, Name: pod1.Name}, endpoint.TargetRef) | ||||
| } | ||||
|  | ||||
| func TestSyncServiceEndpointSlicePendingDeletion(t *testing.T) { | ||||
| @@ -384,7 +384,7 @@ func TestSyncServiceEndpointSlicePendingDeletion(t *testing.T) { | ||||
| 	service := createService(t, esController, ns, serviceName) | ||||
| 	logger, _ := ktesting.NewTestContext(t) | ||||
| 	err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName)) | ||||
| 	assert.Nil(t, err, "Expected no error syncing service") | ||||
| 	require.NoError(t, err, "Expected no error syncing service") | ||||
|  | ||||
| 	gvk := schema.GroupVersionKind{Version: "v1", Kind: "Service"} | ||||
| 	ownerRef := metav1.NewControllerRef(service, gvk) | ||||
| @@ -415,7 +415,7 @@ func TestSyncServiceEndpointSlicePendingDeletion(t *testing.T) { | ||||
| 	logger, _ = ktesting.NewTestContext(t) | ||||
| 	numActionsBefore := len(client.Actions()) | ||||
| 	err = esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName)) | ||||
| 	assert.Nil(t, err, "Expected no error syncing service") | ||||
| 	require.NoError(t, err, "Expected no error syncing service") | ||||
|  | ||||
| 	// The EndpointSlice marked for deletion should be ignored by the controller, and thus | ||||
| 	// should not result in any action. | ||||
| @@ -505,7 +505,7 @@ func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) { | ||||
| 	numActionsBefore := len(client.Actions()) | ||||
| 	logger, _ := ktesting.NewTestContext(t) | ||||
| 	err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName)) | ||||
| 	assert.Nil(t, err, "Expected no error syncing service") | ||||
| 	require.NoError(t, err, "Expected no error syncing service") | ||||
|  | ||||
| 	if len(client.Actions()) != numActionsBefore+2 { | ||||
| 		t.Errorf("Expected 2 more actions, got %d", len(client.Actions())-numActionsBefore) | ||||
| @@ -1292,16 +1292,16 @@ func TestSyncService(t *testing.T) { | ||||
| 			esController.serviceStore.Add(testcase.service) | ||||
|  | ||||
| 			_, err := esController.client.CoreV1().Services(testcase.service.Namespace).Create(context.TODO(), testcase.service, metav1.CreateOptions{}) | ||||
| 			assert.Nil(t, err, "Expected no error creating service") | ||||
| 			require.NoError(t, err, "Expected no error creating service") | ||||
|  | ||||
| 			logger, _ := ktesting.NewTestContext(t) | ||||
| 			err = esController.syncService(logger, fmt.Sprintf("%s/%s", testcase.service.Namespace, testcase.service.Name)) | ||||
| 			assert.Nil(t, err) | ||||
| 			require.NoError(t, err) | ||||
|  | ||||
| 			// last action should be to create endpoint slice | ||||
| 			expectActions(t, client.Actions(), 1, "create", "endpointslices") | ||||
| 			sliceList, err := client.DiscoveryV1().EndpointSlices(testcase.service.Namespace).List(context.TODO(), metav1.ListOptions{}) | ||||
| 			assert.Nil(t, err, "Expected no error fetching endpoint slices") | ||||
| 			require.NoError(t, err, "Expected no error fetching endpoint slices") | ||||
| 			assert.Len(t, sliceList.Items, 1, "Expected 1 endpoint slices") | ||||
|  | ||||
| 			// ensure all attributes of endpoint slice match expected state | ||||
| @@ -1684,7 +1684,7 @@ func TestPodDeleteBatching(t *testing.T) { | ||||
| 				time.Sleep(update.delay) | ||||
|  | ||||
| 				old, exists, err := esController.podStore.GetByKey(fmt.Sprintf("%s/%s", ns, update.podName)) | ||||
| 				assert.Nil(t, err, "error while retrieving old value of %q: %v", update.podName, err) | ||||
| 				require.NoError(t, err, "error while retrieving old value of %q: %v", update.podName, err) | ||||
| 				assert.True(t, exists, "pod should exist") | ||||
| 				esController.podStore.Delete(old) | ||||
| 				esController.deletePod(old) | ||||
| @@ -2050,7 +2050,7 @@ func standardSyncService(t *testing.T, esController *endpointSliceController, na | ||||
|  | ||||
| 	logger, _ := ktesting.NewTestContext(t) | ||||
| 	err := esController.syncService(logger, fmt.Sprintf("%s/%s", namespace, serviceName)) | ||||
| 	assert.Nil(t, err, "Expected no error syncing service") | ||||
| 	require.NoError(t, err, "Expected no error syncing service") | ||||
| } | ||||
|  | ||||
| func createService(t *testing.T, esController *endpointSliceController, namespace, serviceName string) *v1.Service { | ||||
| @@ -2070,7 +2070,7 @@ func createService(t *testing.T, esController *endpointSliceController, namespac | ||||
| 	} | ||||
| 	esController.serviceStore.Add(service) | ||||
| 	_, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) | ||||
| 	assert.Nil(t, err, "Expected no error creating service") | ||||
| 	require.NoError(t, err, "Expected no error creating service") | ||||
| 	return service | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -757,7 +757,7 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform | ||||
| 					tc.expectedDesiredReplicas, | ||||
| 					(int64(tc.reportedLevels[0])*100)/tc.reportedCPURequests[0].MilliValue(), tc.specReplicas), obj.Message) | ||||
| 			default: | ||||
| 				assert.False(t, true, fmt.Sprintf("Unexpected event: %s / %s", obj.Reason, obj.Message)) | ||||
| 				assert.False(t, true, "Unexpected event: %s / %s", obj.Reason, obj.Message) | ||||
| 			} | ||||
| 		} | ||||
| 		tc.eventCreated = true | ||||
| @@ -3863,7 +3863,7 @@ func TestCalculateScaleUpLimitWithScalingRules(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	assert.Equal(t, calculated, int32(2)) | ||||
| 	assert.Equal(t, int32(2), calculated) | ||||
| } | ||||
|  | ||||
| func TestCalculateScaleDownLimitWithBehaviors(t *testing.T) { | ||||
| @@ -3885,7 +3885,7 @@ func TestCalculateScaleDownLimitWithBehaviors(t *testing.T) { | ||||
| 			}, | ||||
| 		}, | ||||
| 	}) | ||||
| 	assert.Equal(t, calculated, int32(3)) | ||||
| 	assert.Equal(t, int32(3), calculated) | ||||
| } | ||||
|  | ||||
| func generateScalingRules(pods, podsPeriod, percent, percentPeriod, stabilizationWindow int32) *autoscalingv2.HPAScalingRules { | ||||
|   | ||||
| @@ -41,6 +41,7 @@ import ( | ||||
| 	emfake "k8s.io/metrics/pkg/client/external_metrics/fake" | ||||
|  | ||||
| 	"github.com/stretchr/testify/assert" | ||||
| 	"github.com/stretchr/testify/require" | ||||
| ) | ||||
|  | ||||
| var fixedTimestamp = time.Date(2015, time.November, 10, 12, 30, 0, 0, time.UTC) | ||||
| @@ -209,11 +210,11 @@ func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clie | ||||
|  | ||||
| func (tc *restClientTestCase) verifyResults(t *testing.T, metrics PodMetricsInfo, timestamp time.Time, err error) { | ||||
| 	if tc.desiredError != nil { | ||||
| 		assert.Error(t, err, "there should be an error retrieving the metrics") | ||||
| 		require.Error(t, err, "there should be an error retrieving the metrics") | ||||
| 		assert.Contains(t, fmt.Sprintf("%v", err), fmt.Sprintf("%v", tc.desiredError), "the error message should be as expected") | ||||
| 		return | ||||
| 	} | ||||
| 	assert.NoError(t, err, "there should be no error retrieving the metrics") | ||||
| 	require.NoError(t, err, "there should be no error retrieving the metrics") | ||||
| 	assert.NotNil(t, metrics, "there should be metrics returned") | ||||
|  | ||||
| 	if len(metrics) != len(tc.desiredMetricValues) { | ||||
| @@ -230,7 +231,7 @@ func (tc *restClientTestCase) verifyResults(t *testing.T, metrics PodMetricsInfo | ||||
| 	} | ||||
|  | ||||
| 	targetTimestamp := offsetTimestampBy(tc.targetTimestamp) | ||||
| 	assert.True(t, targetTimestamp.Equal(timestamp), fmt.Sprintf("the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp)) | ||||
| 	assert.True(t, targetTimestamp.Equal(timestamp), "the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp) | ||||
| } | ||||
|  | ||||
| func (tc *restClientTestCase) runTest(t *testing.T) { | ||||
|   | ||||
| @@ -21,6 +21,7 @@ import ( | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/stretchr/testify/assert" | ||||
| 	"github.com/stretchr/testify/require" | ||||
| ) | ||||
|  | ||||
| type resourceUtilizationRatioTestCase struct { | ||||
| @@ -38,12 +39,12 @@ func (tc *resourceUtilizationRatioTestCase) runTest(t *testing.T) { | ||||
| 	actualUtilizationRatio, actualCurrentUtilization, actualRawAverageValue, actualErr := GetResourceUtilizationRatio(tc.metrics, tc.requests, tc.targetUtilization) | ||||
|  | ||||
| 	if tc.expectedErr != nil { | ||||
| 		assert.Error(t, actualErr, "there should be an error getting the utilization ratio") | ||||
| 		require.Error(t, actualErr, "there should be an error getting the utilization ratio") | ||||
| 		assert.Contains(t, fmt.Sprintf("%v", actualErr), fmt.Sprintf("%v", tc.expectedErr), "the error message should be as expected") | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	assert.NoError(t, actualErr, "there should be no error retrieving the utilization ratio") | ||||
| 	require.NoError(t, actualErr, "there should be no error retrieving the utilization ratio") | ||||
| 	assert.Equal(t, tc.expectedUtilizationRatio, actualUtilizationRatio, "the utilization ratios should be as expected") | ||||
| 	assert.Equal(t, tc.expectedCurrentUtilization, actualCurrentUtilization, "the current utilization should be as expected") | ||||
| 	assert.Equal(t, tc.expectedRawAverageValue, actualRawAverageValue, "the raw average value should be as expected") | ||||
|   | ||||
| @@ -355,9 +355,7 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) { | ||||
| 	selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ | ||||
| 		MatchLabels: map[string]string{"name": podNamePrefix}, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		require.Nil(t, err, "something went horribly wrong...") | ||||
| 	} | ||||
| 	require.NoError(t, err, "something went horribly wrong...") | ||||
|  | ||||
| 	if tc.resource != nil { | ||||
| 		outReplicas, outUtilization, outRawValue, outTimestamp, err := replicaCalc.GetResourceReplicas(context.TODO(), tc.currentReplicas, tc.resource.targetUtilization, tc.resource.name, testNamespace, selector, tc.container) | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Matthieu MOREL
					Matthieu MOREL