cleanup: omit comparison with bool constants
Signed-off-by: tao.yang <tao.yang@daocloud.io>
This commit is contained in:
		| @@ -3365,7 +3365,7 @@ func validateHostUsers(spec *core.PodSpec, fldPath *field.Path) field.ErrorList | ||||
|  | ||||
| 	// Only make the following checks if hostUsers is false (otherwise, the container uses the | ||||
| 	// same userns as the host, and so there isn't anything to check). | ||||
| 	if spec.SecurityContext == nil || spec.SecurityContext.HostUsers == nil || *spec.SecurityContext.HostUsers == true { | ||||
| 	if spec.SecurityContext == nil || spec.SecurityContext.HostUsers == nil || *spec.SecurityContext.HostUsers { | ||||
| 		return allErrs | ||||
| 	} | ||||
|  | ||||
|   | ||||
| @@ -672,7 +672,7 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podStatus *ku | ||||
| 		switch rName { | ||||
| 		case v1.ResourceCPU: | ||||
| 			podCpuResources := &cm.ResourceConfig{CPUPeriod: podResources.CPUPeriod} | ||||
| 			if setLimitValue == true { | ||||
| 			if setLimitValue { | ||||
| 				podCpuResources.CPUQuota = podResources.CPUQuota | ||||
| 			} else { | ||||
| 				podCpuResources.CPUShares = podResources.CPUShares | ||||
|   | ||||
| @@ -374,7 +374,7 @@ func (m *UsernsManager) GetOrCreateUserNamespaceMappings(pod *v1.Pod) (*runtimea | ||||
| 	m.lock.Lock() | ||||
| 	defer m.lock.Unlock() | ||||
|  | ||||
| 	if pod.Spec.HostUsers == nil || *pod.Spec.HostUsers == true { | ||||
| 	if pod.Spec.HostUsers == nil || *pod.Spec.HostUsers { | ||||
| 		return &runtimeapi.UserNamespace{ | ||||
| 			Mode: runtimeapi.NamespaceMode_NODE, | ||||
| 		}, nil | ||||
|   | ||||
| @@ -143,10 +143,10 @@ func TestExec(t *testing.T) { | ||||
| 		if status != test.expectedStatus { | ||||
| 			t.Errorf("[%d] expected %v, got %v", i, test.expectedStatus, status) | ||||
| 		} | ||||
| 		if err != nil && test.expectError == false { | ||||
| 		if err != nil && !test.expectError { | ||||
| 			t.Errorf("[%d] unexpected error: %v", i, err) | ||||
| 		} | ||||
| 		if err == nil && test.expectError == true { | ||||
| 		if err == nil && test.expectError { | ||||
| 			t.Errorf("[%d] unexpected non-error", i) | ||||
| 		} | ||||
| 		if test.output != output { | ||||
|   | ||||
| @@ -698,7 +698,7 @@ func TestRevertPorts(t *testing.T) { | ||||
| 			} | ||||
| 		} | ||||
| 		for _, lp := range tc.existingPorts { | ||||
| 			if existingPortsMap[lp].(*fakeClosable).closed == true { | ||||
| 			if existingPortsMap[lp].(*fakeClosable).closed { | ||||
| 				t.Errorf("Expect existing localport %v to be false in test case %v", lp, i) | ||||
| 			} | ||||
| 		} | ||||
|   | ||||
| @@ -11671,7 +11671,7 @@ func TestServiceRegistryResourceLocation(t *testing.T) { | ||||
| 			if tc.err == false && err != nil { | ||||
| 				t.Fatalf("unexpected error: %v", err) | ||||
| 			} | ||||
| 			if tc.err == true && err == nil { | ||||
| 			if tc.err && err == nil { | ||||
| 				t.Fatalf("unexpected success") | ||||
| 			} | ||||
| 			if !tc.err { | ||||
|   | ||||
| @@ -209,7 +209,7 @@ func TestHeap_Get(t *testing.T) { | ||||
| 	} | ||||
| 	// Get non-existing object. | ||||
| 	_, exists, err = h.Get(mkHeapObj("non-existing", 0)) | ||||
| 	if err != nil || exists == true { | ||||
| 	if err != nil || exists { | ||||
| 		t.Fatalf("didn't expect to get any object") | ||||
| 	} | ||||
| } | ||||
| @@ -223,12 +223,12 @@ func TestHeap_GetByKey(t *testing.T) { | ||||
| 	h.Add(mkHeapObj("baz", 11)) | ||||
|  | ||||
| 	obj, exists, err := h.GetByKey("baz") | ||||
| 	if err != nil || exists == false || obj.(testHeapObject).val != 11 { | ||||
| 	if err != nil || !exists || obj.(testHeapObject).val != 11 { | ||||
| 		t.Fatalf("unexpected error in getting element") | ||||
| 	} | ||||
| 	// Get non-existing object. | ||||
| 	_, exists, err = h.GetByKey("non-existing") | ||||
| 	if err != nil || exists == true { | ||||
| 	if err != nil || exists { | ||||
| 		t.Fatalf("didn't expect to get any object") | ||||
| 	} | ||||
| } | ||||
|   | ||||
| @@ -238,7 +238,7 @@ func (a *gcPermissionsEnforcement) ownerRefToDeleteAttributeRecords(ref metav1.O | ||||
| func blockingOwnerRefs(refs []metav1.OwnerReference) []metav1.OwnerReference { | ||||
| 	var ret []metav1.OwnerReference | ||||
| 	for _, ref := range refs { | ||||
| 		if ref.BlockOwnerDeletion != nil && *ref.BlockOwnerDeletion == true { | ||||
| 		if ref.BlockOwnerDeletion != nil && *ref.BlockOwnerDeletion { | ||||
| 			ret = append(ret, ref) | ||||
| 		} | ||||
| 	} | ||||
|   | ||||
| @@ -120,10 +120,10 @@ func TestConfigNormalization(t *testing.T) { | ||||
| 	} | ||||
| 	for _, tt := range tests { | ||||
| 		err := normalizeWebhookConfig(&tt.config) | ||||
| 		if err == nil && tt.wantErr == true { | ||||
| 		if err == nil && tt.wantErr { | ||||
| 			t.Errorf("%s: expected error from normalization and didn't have one", tt.test) | ||||
| 		} | ||||
| 		if err != nil && tt.wantErr == false { | ||||
| 		if err != nil && !tt.wantErr { | ||||
| 			t.Errorf("%s: unexpected error from normalization: %v", tt.test, err) | ||||
| 		} | ||||
| 		if err == nil && !reflect.DeepEqual(tt.config, tt.normalizedConfig) { | ||||
|   | ||||
| @@ -325,7 +325,7 @@ func validateCustomResourceDefinitionSpec(ctx context.Context, spec *apiextensio | ||||
| 	} | ||||
| 	if opts.allowDefaults && specHasDefaults(spec) { | ||||
| 		opts.requireStructuralSchema = true | ||||
| 		if spec.PreserveUnknownFields == nil || *spec.PreserveUnknownFields == true { | ||||
| 		if spec.PreserveUnknownFields == nil || *spec.PreserveUnknownFields { | ||||
| 			allErrs = append(allErrs, field.Invalid(fldPath.Child("preserveUnknownFields"), true, "must be false in order to use defaults in the schema")) | ||||
| 		} | ||||
| 	} | ||||
| @@ -873,7 +873,7 @@ func ValidateCustomResourceDefinitionOpenAPISchema(schema *apiextensions.JSONSch | ||||
| 	} | ||||
| 	allErrs.SchemaErrors = append(allErrs.SchemaErrors, ssv.validate(schema, fldPath)...) | ||||
|  | ||||
| 	if schema.UniqueItems == true { | ||||
| 	if schema.UniqueItems { | ||||
| 		allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Forbidden(fldPath.Child("uniqueItems"), "uniqueItems cannot be set to true since the runtime complexity becomes quadratic")) | ||||
| 	} | ||||
|  | ||||
| @@ -888,7 +888,7 @@ func ValidateCustomResourceDefinitionOpenAPISchema(schema *apiextensions.JSONSch | ||||
| 	//       restricted like additionalProperties. | ||||
| 	if schema.AdditionalProperties != nil { | ||||
| 		if len(schema.Properties) != 0 { | ||||
| 			if schema.AdditionalProperties.Allows == false || schema.AdditionalProperties.Schema != nil { | ||||
| 			if !schema.AdditionalProperties.Allows || schema.AdditionalProperties.Schema != nil { | ||||
| 				allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Forbidden(fldPath.Child("additionalProperties"), "additionalProperties and properties are mutual exclusive")) | ||||
| 			} | ||||
| 		} | ||||
| @@ -977,7 +977,7 @@ func ValidateCustomResourceDefinitionOpenAPISchema(schema *apiextensions.JSONSch | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if schema.XPreserveUnknownFields != nil && *schema.XPreserveUnknownFields == false { | ||||
| 	if schema.XPreserveUnknownFields != nil && !*schema.XPreserveUnknownFields { | ||||
| 		allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("x-kubernetes-preserve-unknown-fields"), *schema.XPreserveUnknownFields, "must be true or undefined")) | ||||
| 	} | ||||
|  | ||||
|   | ||||
| @@ -56,10 +56,10 @@ func TestPortRange(t *testing.T) { | ||||
| 		pr := &PortRange{} | ||||
| 		var f flag.Value = pr | ||||
| 		err := f.Set(tc.input) | ||||
| 		if err != nil && tc.success == true { | ||||
| 		if err != nil && tc.success { | ||||
| 			t.Errorf("expected success, got %q", err) | ||||
| 			continue | ||||
| 		} else if err == nil && tc.success == false { | ||||
| 		} else if err == nil && !tc.success { | ||||
| 			t.Errorf("expected failure %#v", testCases[i]) | ||||
| 			continue | ||||
| 		} else if tc.success { | ||||
|   | ||||
| @@ -541,9 +541,9 @@ func TestValidateEgressSelectorConfiguration(t *testing.T) { | ||||
| 	for _, tc := range testcases { | ||||
| 		t.Run(tc.name, func(t *testing.T) { | ||||
| 			errs := ValidateEgressSelectorConfiguration(tc.contents) | ||||
| 			if tc.expectError == false && len(errs) != 0 { | ||||
| 			if !tc.expectError && len(errs) != 0 { | ||||
| 				t.Errorf("Calling ValidateEgressSelectorConfiguration expected no error, got %v", errs) | ||||
| 			} else if tc.expectError == true && len(errs) == 0 { | ||||
| 			} else if tc.expectError && len(errs) == 0 { | ||||
| 				t.Errorf("Calling ValidateEgressSelectorConfiguration expected error, got no error") | ||||
| 			} | ||||
| 		}) | ||||
|   | ||||
| @@ -67,9 +67,9 @@ func TestValidateTracingOptions(t *testing.T) { | ||||
| 	for _, tc := range testcases { | ||||
| 		t.Run(tc.name, func(t *testing.T) { | ||||
| 			errs := tc.contents.Validate() | ||||
| 			if tc.expectError == false && len(errs) != 0 { | ||||
| 			if !tc.expectError && len(errs) != 0 { | ||||
| 				t.Errorf("Calling Validate expected no error, got %v", errs) | ||||
| 			} else if tc.expectError == true && len(errs) == 0 { | ||||
| 			} else if tc.expectError && len(errs) == 0 { | ||||
| 				t.Errorf("Calling Validate expected error, got no error") | ||||
| 			} | ||||
| 		}) | ||||
|   | ||||
| @@ -485,7 +485,7 @@ func TestCacheWatcherDrainingNoBookmarkAfterResourceVersionReceived(t *testing.T | ||||
| 	forget := func(drainWatcher bool) { | ||||
| 		lock.Lock() | ||||
| 		defer lock.Unlock() | ||||
| 		if drainWatcher == true { | ||||
| 		if drainWatcher { | ||||
| 			t.Fatalf("didn't expect drainWatcher to be set to true") | ||||
| 		} | ||||
| 		count++ | ||||
|   | ||||
| @@ -1252,7 +1252,7 @@ func (c *Cacher) LastSyncResourceVersion() (uint64, error) { | ||||
| // | ||||
| // The returned function must be called under the watchCache lock. | ||||
| func (c *Cacher) getBookmarkAfterResourceVersionLockedFunc(ctx context.Context, parsedResourceVersion uint64, opts storage.ListOptions) (func() uint64, error) { | ||||
| 	if opts.SendInitialEvents == nil || *opts.SendInitialEvents == false || !opts.Predicate.AllowWatchBookmarks { | ||||
| 	if opts.SendInitialEvents == nil || !*opts.SendInitialEvents || !opts.Predicate.AllowWatchBookmarks { | ||||
| 		return func() uint64 { return 0 }, nil | ||||
| 	} | ||||
| 	return c.getCommonResourceVersionLockedFunc(ctx, parsedResourceVersion, opts) | ||||
| @@ -1267,7 +1267,7 @@ func (c *Cacher) getBookmarkAfterResourceVersionLockedFunc(ctx context.Context, | ||||
| // | ||||
| // The returned function must be called under the watchCache lock. | ||||
| func (c *Cacher) getStartResourceVersionForWatchLockedFunc(ctx context.Context, parsedWatchResourceVersion uint64, opts storage.ListOptions) (func() uint64, error) { | ||||
| 	if opts.SendInitialEvents == nil || *opts.SendInitialEvents == true { | ||||
| 	if opts.SendInitialEvents == nil || *opts.SendInitialEvents { | ||||
| 		return func() uint64 { return parsedWatchResourceVersion }, nil | ||||
| 	} | ||||
| 	return c.getCommonResourceVersionLockedFunc(ctx, parsedWatchResourceVersion, opts) | ||||
| @@ -1298,7 +1298,7 @@ func (c *Cacher) getCommonResourceVersionLockedFunc(ctx context.Context, parsedW | ||||
| // Additionally, it instructs the caller whether it should ask for | ||||
| // all events from the cache (full state) or not. | ||||
| func (c *Cacher) waitUntilWatchCacheFreshAndForceAllEvents(ctx context.Context, requestedWatchRV uint64, opts storage.ListOptions) (bool, error) { | ||||
| 	if opts.SendInitialEvents != nil && *opts.SendInitialEvents == true { | ||||
| 	if opts.SendInitialEvents != nil && *opts.SendInitialEvents { | ||||
| 		err := c.watchCache.waitUntilFreshAndBlock(ctx, requestedWatchRV) | ||||
| 		defer c.watchCache.RUnlock() | ||||
| 		return err == nil, err | ||||
|   | ||||
| @@ -56,7 +56,7 @@ func TestCRDFinderErrors(t *testing.T) { | ||||
| 	} | ||||
| 	finder := NewCRDFinder(getter) | ||||
| 	found, err := finder.HasCRD(schema.GroupKind{Group: "", Kind: "Pod"}) | ||||
| 	if found == true { | ||||
| 	if found { | ||||
| 		t.Fatalf("Found the CRD with non-working getter function") | ||||
| 	} | ||||
| 	if err == nil { | ||||
|   | ||||
| @@ -264,7 +264,7 @@ func TestHeap_Get(t *testing.T) { | ||||
| 	} | ||||
| 	// Get non-existing object. | ||||
| 	_, exists, err = h.Get(mkHeapObj("non-existing", 0)) | ||||
| 	if err != nil || exists == true { | ||||
| 	if err != nil || exists { | ||||
| 		t.Fatalf("didn't expect to get any object") | ||||
| 	} | ||||
| } | ||||
| @@ -283,7 +283,7 @@ func TestHeap_GetByKey(t *testing.T) { | ||||
| 	} | ||||
| 	// Get non-existing object. | ||||
| 	_, exists, err = h.GetByKey("non-existing") | ||||
| 	if err != nil || exists == true { | ||||
| 	if err != nil || exists { | ||||
| 		t.Fatalf("didn't expect to get any object") | ||||
| 	} | ||||
| } | ||||
|   | ||||
| @@ -784,7 +784,7 @@ func TestSelectZoneForVolume(t *testing.T) { | ||||
| 					t.Errorf("Unexpected error from SelectZoneForVolume for %s; Error: %v", test.Name, err) | ||||
| 				} | ||||
|  | ||||
| 				if test.ExpectSpecificZone == true { | ||||
| 				if test.ExpectSpecificZone { | ||||
| 					if zone != test.ExpectedZone { | ||||
| 						t.Errorf("Expected zone %v does not match obtained zone %v for %s", test.ExpectedZone, zone, test.Name) | ||||
| 					} | ||||
|   | ||||
| @@ -77,7 +77,7 @@ func genStatus(t *types.Type) bool { | ||||
| // hasObjectMeta returns true if the type has a ObjectMeta field. | ||||
| func hasObjectMeta(t *types.Type) bool { | ||||
| 	for _, m := range t.Members { | ||||
| 		if m.Embedded == true && m.Name == "ObjectMeta" { | ||||
| 		if m.Embedded && m.Name == "ObjectMeta" { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
|   | ||||
| @@ -93,7 +93,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat | ||||
| 		for _, t := range pkg.Types { | ||||
| 			klog.V(5).Infof("considering type = %s", t.Name.String()) | ||||
| 			for _, typeMember := range t.Members { | ||||
| 				if typeMember.Name == "TypeMeta" && typeMember.Embedded == true { | ||||
| 				if typeMember.Name == "TypeMeta" && typeMember.Embedded { | ||||
| 					typesToRegister = append(typesToRegister, t) | ||||
| 				} | ||||
| 			} | ||||
|   | ||||
| @@ -97,9 +97,9 @@ func TestValidateTracingConfiguration(t *testing.T) { | ||||
| 	for _, tc := range testcases { | ||||
| 		t.Run(tc.name, func(t *testing.T) { | ||||
| 			errs := ValidateTracingConfiguration(tc.contents, nil, field.NewPath("tracing")) | ||||
| 			if tc.expectError == false && len(errs) != 0 { | ||||
| 			if !tc.expectError && len(errs) != 0 { | ||||
| 				t.Errorf("Calling ValidateTracingConfiguration expected no error, got %v", errs) | ||||
| 			} else if tc.expectError == true && len(errs) == 0 { | ||||
| 			} else if tc.expectError && len(errs) == 0 { | ||||
| 				t.Errorf("Calling ValidateTracingConfiguration expected error, got no error") | ||||
| 			} | ||||
| 		}) | ||||
|   | ||||
| @@ -254,10 +254,10 @@ func TestGetBool(t *testing.T) { | ||||
| 	for _, tt := range testCases { | ||||
| 		t.Run(tt.name, func(t *testing.T) { | ||||
| 			got, err := GetBool(tt.parameters, tt.key, tt.defaultValue) | ||||
| 			if err != nil && tt.expectError == false { | ||||
| 			if err != nil && !tt.expectError { | ||||
| 				t.Errorf("%s: unexpected error: %v", tt.name, err) | ||||
| 			} | ||||
| 			if err == nil && tt.expectError == true { | ||||
| 			if err == nil && tt.expectError { | ||||
| 				t.Errorf("%s: expect error, got nil", tt.name) | ||||
| 			} | ||||
| 			if got != tt.expected { | ||||
|   | ||||
| @@ -2091,7 +2091,7 @@ func deduplicate(collection *[]string) *[]string { | ||||
| 	result := make([]string, 0, len(*collection)) | ||||
|  | ||||
| 	for _, v := range *collection { | ||||
| 		if seen[v] == true { | ||||
| 		if seen[v] { | ||||
| 			// skip this element | ||||
| 		} else { | ||||
| 			seen[v] = true | ||||
|   | ||||
| @@ -904,7 +904,7 @@ func (ss *scaleSet) getPrimaryNetworkInterfaceConfiguration(networkConfiguration | ||||
|  | ||||
| 	for idx := range networkConfigurations { | ||||
| 		networkConfig := &networkConfigurations[idx] | ||||
| 		if networkConfig.Primary != nil && *networkConfig.Primary == true { | ||||
| 		if networkConfig.Primary != nil && *networkConfig.Primary { | ||||
| 			return networkConfig, nil | ||||
| 		} | ||||
| 	} | ||||
| @@ -920,7 +920,7 @@ func (ss *scaleSet) getPrimaryNetworkInterfaceConfigurationForScaleSet(networkCo | ||||
|  | ||||
| 	for idx := range networkConfigurations { | ||||
| 		networkConfig := &networkConfigurations[idx] | ||||
| 		if networkConfig.Primary != nil && *networkConfig.Primary == true { | ||||
| 		if networkConfig.Primary != nil && *networkConfig.Primary { | ||||
| 			return networkConfig, nil | ||||
| 		} | ||||
| 	} | ||||
| @@ -936,7 +936,7 @@ func getPrimaryIPConfigFromVMSSNetworkConfig(config *compute.VirtualMachineScale | ||||
|  | ||||
| 	for idx := range ipConfigurations { | ||||
| 		ipConfig := &ipConfigurations[idx] | ||||
| 		if ipConfig.Primary != nil && *ipConfig.Primary == true { | ||||
| 		if ipConfig.Primary != nil && *ipConfig.Primary { | ||||
| 			return ipConfig, nil | ||||
| 		} | ||||
| 	} | ||||
|   | ||||
| @@ -134,7 +134,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { | ||||
| 		for vc, vsi := range nm.vsphereInstanceMap { | ||||
|  | ||||
| 			found := getVMFound() | ||||
| 			if found == true { | ||||
| 			if found { | ||||
| 				break | ||||
| 			} | ||||
|  | ||||
| @@ -175,7 +175,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { | ||||
|  | ||||
| 			for _, datacenterObj := range datacenterObjs { | ||||
| 				found := getVMFound() | ||||
| 				if found == true { | ||||
| 				if found { | ||||
| 					break | ||||
| 				} | ||||
|  | ||||
|   | ||||
| @@ -726,7 +726,7 @@ func (v *podStartVerifier) Verify(event watch.Event) error { | ||||
| 	} | ||||
|  | ||||
| 	if status := e2epod.FindContainerStatusInPod(pod, "blocked"); status != nil { | ||||
| 		if (status.Started != nil && *status.Started == true) || status.LastTerminationState.Terminated != nil || status.State.Waiting == nil { | ||||
| 		if (status.Started != nil && *status.Started) || status.LastTerminationState.Terminated != nil || status.State.Waiting == nil { | ||||
| 			return fmt.Errorf("pod %s on node %s should not have started the blocked container: %#v", pod.Name, pod.Spec.NodeName, status) | ||||
| 		} | ||||
| 	} | ||||
|   | ||||
| @@ -196,15 +196,15 @@ func verifyDiskFormat(ctx context.Context, client clientset.Interface, nodeName | ||||
| 	} | ||||
| 	isDiskFormatCorrect := false | ||||
| 	if diskFormat == "eagerzeroedthick" { | ||||
| 		if eagerlyScrub == true && thinProvisioned == false { | ||||
| 		if eagerlyScrub && !thinProvisioned { | ||||
| 			isDiskFormatCorrect = true | ||||
| 		} | ||||
| 	} else if diskFormat == "zeroedthick" { | ||||
| 		if eagerlyScrub == false && thinProvisioned == false { | ||||
| 		if !eagerlyScrub && !thinProvisioned { | ||||
| 			isDiskFormatCorrect = true | ||||
| 		} | ||||
| 	} else if diskFormat == "thin" { | ||||
| 		if eagerlyScrub == false && thinProvisioned == true { | ||||
| 		if !eagerlyScrub && thinProvisioned { | ||||
| 			isDiskFormatCorrect = true | ||||
| 		} | ||||
| 	} | ||||
|   | ||||
| @@ -432,9 +432,9 @@ func TestMatchConditions(t *testing.T) { | ||||
|  | ||||
| 			for _, pod := range testcase.pods { | ||||
| 				_, err := client.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, dryRunCreate) | ||||
| 				if testcase.expectErrorPod == false && err != nil { | ||||
| 				if !testcase.expectErrorPod && err != nil { | ||||
| 					t.Fatalf("unexpected error creating test pod: %v", err) | ||||
| 				} else if testcase.expectErrorPod == true && err == nil { | ||||
| 				} else if testcase.expectErrorPod && err == nil { | ||||
| 					t.Fatal("expected error creating pods") | ||||
| 				} | ||||
| 			} | ||||
|   | ||||
| @@ -430,9 +430,9 @@ func TestEvictionWithFinalizers(t *testing.T) { | ||||
| 				t.Fatalf("Failed to get the pod %q with error: %q", klog.KObj(pod), e) | ||||
| 			} | ||||
| 			_, cond := podutil.GetPodCondition(&updatedPod.Status, v1.PodConditionType(v1.DisruptionTarget)) | ||||
| 			if tc.wantDisruptionTargetCond == true && cond == nil { | ||||
| 			if tc.wantDisruptionTargetCond && cond == nil { | ||||
| 				t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(updatedPod), v1.DisruptionTarget) | ||||
| 			} else if tc.wantDisruptionTargetCond == false && cond != nil { | ||||
| 			} else if !tc.wantDisruptionTargetCond && cond != nil { | ||||
| 				t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(updatedPod), v1.DisruptionTarget) | ||||
| 			} | ||||
| 		}) | ||||
|   | ||||
| @@ -163,9 +163,9 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) { | ||||
| 				t.Fatalf("Test Failed: error: %q, while getting updated pod", err) | ||||
| 			} | ||||
| 			_, cond := podutil.GetPodCondition(&testPod.Status, v1.DisruptionTarget) | ||||
| 			if test.enablePodDisruptionConditions == true && cond == nil { | ||||
| 			if test.enablePodDisruptionConditions && cond == nil { | ||||
| 				t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(testPod), v1.DisruptionTarget) | ||||
| 			} else if test.enablePodDisruptionConditions == false && cond != nil { | ||||
| 			} else if !test.enablePodDisruptionConditions && cond != nil { | ||||
| 				t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(testPod), v1.DisruptionTarget) | ||||
| 			} | ||||
| 		}) | ||||
|   | ||||
| @@ -513,7 +513,7 @@ func TestPreemption(t *testing.T) { | ||||
| 						t.Errorf("Error %v when getting the updated status for pod %v/%v ", err, p.Namespace, p.Name) | ||||
| 					} | ||||
| 					_, cond := podutil.GetPodCondition(&pod.Status, v1.DisruptionTarget) | ||||
| 					if test.enablePodDisruptionConditions == true && cond == nil { | ||||
| 					if test.enablePodDisruptionConditions && cond == nil { | ||||
| 						t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.DisruptionTarget) | ||||
| 					} else if test.enablePodDisruptionConditions == false && cond != nil { | ||||
| 						t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.DisruptionTarget) | ||||
|   | ||||
| @@ -102,7 +102,7 @@ func TestUnschedulableNodes(t *testing.T) { | ||||
| 					// Nodes that are unschedulable or that are not ready or | ||||
| 					// have their disk full (Node.Spec.Conditions) are excluded | ||||
| 					// based on NodeConditionPredicate, a separate check | ||||
| 					return node != nil && node.(*v1.Node).Spec.Unschedulable == true | ||||
| 					return node != nil && node.(*v1.Node).Spec.Unschedulable | ||||
| 				}) | ||||
| 				if err != nil { | ||||
| 					t.Fatalf("Failed to observe reflected update for setting unschedulable=true: %v", err) | ||||
|   | ||||
| @@ -79,9 +79,9 @@ func runTestAPICiphers(t *testing.T, testID int, kubePort int, clientCiphers []u | ||||
| 		defer resp.Body.Close() | ||||
| 	} | ||||
|  | ||||
| 	if expectedError == true && err == nil { | ||||
| 	if expectedError && err == nil { | ||||
| 		t.Fatalf("%d: expecting error for cipher test, client cipher is supported and it should't", testID) | ||||
| 	} else if err != nil && expectedError == false { | ||||
| 	} else if err != nil && !expectedError { | ||||
| 		t.Fatalf("%d: not expecting error by client with cipher failed: %+v", testID, err) | ||||
| 	} | ||||
| } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 tao.yang
					tao.yang