Merge pull request #129519 from kishen-v/automated-cherry-pick-of-#127422-upstream-release-1.31
Automated cherry pick of #127422: Fix Go vet errors for master golang
This commit is contained in:
commit
d7fc7e30cb
@ -19,7 +19,6 @@ package renewal
|
|||||||
import (
|
import (
|
||||||
"crypto"
|
"crypto"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -265,7 +264,7 @@ func TestPKICertificateReadWriterExists(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
filename := "testfile"
|
filename := "testfile"
|
||||||
tmpfilepath := filepath.Join(tmpdir, fmt.Sprintf(filename+".crt"))
|
tmpfilepath := filepath.Join(tmpdir, filename+".crt")
|
||||||
err = os.WriteFile(tmpfilepath, nil, 0644)
|
err = os.WriteFile(tmpfilepath, nil, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Couldn't write file: %v", err)
|
t.Fatalf("Couldn't write file: %v", err)
|
||||||
|
@ -185,7 +185,7 @@ func testWorkloadDefaults(t *testing.T, featuresEnabled bool) {
|
|||||||
defaults := detectDefaults(t, rc, reflect.ValueOf(template))
|
defaults := detectDefaults(t, rc, reflect.ValueOf(template))
|
||||||
if !reflect.DeepEqual(expectedDefaults, defaults) {
|
if !reflect.DeepEqual(expectedDefaults, defaults) {
|
||||||
t.Errorf("Defaults for PodTemplateSpec changed. This can cause spurious rollouts of workloads on API server upgrade.")
|
t.Errorf("Defaults for PodTemplateSpec changed. This can cause spurious rollouts of workloads on API server upgrade.")
|
||||||
t.Logf(cmp.Diff(expectedDefaults, defaults))
|
t.Log(cmp.Diff(expectedDefaults, defaults))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
t.Run("hostnet PodTemplateSpec with ports", func(t *testing.T) {
|
t.Run("hostnet PodTemplateSpec with ports", func(t *testing.T) {
|
||||||
@ -223,7 +223,7 @@ func testWorkloadDefaults(t *testing.T, featuresEnabled bool) {
|
|||||||
}()
|
}()
|
||||||
if !reflect.DeepEqual(expected, defaults) {
|
if !reflect.DeepEqual(expected, defaults) {
|
||||||
t.Errorf("Defaults for PodTemplateSpec changed. This can cause spurious rollouts of workloads on API server upgrade.")
|
t.Errorf("Defaults for PodTemplateSpec changed. This can cause spurious rollouts of workloads on API server upgrade.")
|
||||||
t.Logf(cmp.Diff(expected, defaults))
|
t.Log(cmp.Diff(expected, defaults))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -374,7 +374,7 @@ func testPodDefaults(t *testing.T, featuresEnabled bool) {
|
|||||||
defaults := detectDefaults(t, pod, reflect.ValueOf(pod))
|
defaults := detectDefaults(t, pod, reflect.ValueOf(pod))
|
||||||
if !reflect.DeepEqual(expectedDefaults, defaults) {
|
if !reflect.DeepEqual(expectedDefaults, defaults) {
|
||||||
t.Errorf("Defaults for PodSpec changed. This can cause spurious restarts of containers on API server upgrade.")
|
t.Errorf("Defaults for PodSpec changed. This can cause spurious restarts of containers on API server upgrade.")
|
||||||
t.Logf(cmp.Diff(expectedDefaults, defaults))
|
t.Log(cmp.Diff(expectedDefaults, defaults))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2782,7 +2782,7 @@ func waitForChanReceive(t *testing.T, timeout time.Duration, receivingChan chan
|
|||||||
timer := time.NewTimer(timeout)
|
timer := time.NewTimer(timeout)
|
||||||
select {
|
select {
|
||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
t.Errorf(errorMsg)
|
t.Error(errorMsg)
|
||||||
case <-receivingChan:
|
case <-receivingChan:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2450,7 +2450,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
|||||||
node2.Status = healthyNodeNewStatus
|
node2.Status = healthyNodeNewStatus
|
||||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node2, metav1.UpdateOptions{})
|
_, err = fakeNodeHandler.UpdateStatus(ctx, node2, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(err.Error())
|
t.Error(err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
@ -2479,7 +2479,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
|||||||
node3.Status = unhealthyNodeNewStatus
|
node3.Status = unhealthyNodeNewStatus
|
||||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node3, metav1.UpdateOptions{})
|
_, err = fakeNodeHandler.UpdateStatus(ctx, node3, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(err.Error())
|
t.Error(err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
@ -2492,7 +2492,7 @@ func TestApplyNoExecuteTaints(t *testing.T) {
|
|||||||
node3.Status.Conditions = overrideNodeNewStatusConditions
|
node3.Status.Conditions = overrideNodeNewStatusConditions
|
||||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node3, metav1.UpdateOptions{})
|
_, err = fakeNodeHandler.UpdateStatus(ctx, node3, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(err.Error())
|
t.Error(err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||||
@ -2638,7 +2638,7 @@ func TestApplyNoExecuteTaintsToNodesEnqueueTwice(t *testing.T) {
|
|||||||
node0.Status = healthyNodeNewStatus
|
node0.Status = healthyNodeNewStatus
|
||||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node0, metav1.UpdateOptions{})
|
_, err = fakeNodeHandler.UpdateStatus(ctx, node0, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(err.Error())
|
t.Error(err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2870,12 +2870,12 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
|||||||
node1.Status = healthyNodeNewStatus
|
node1.Status = healthyNodeNewStatus
|
||||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node0, metav1.UpdateOptions{})
|
_, err = fakeNodeHandler.UpdateStatus(ctx, node0, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(err.Error())
|
t.Error(err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_, err = fakeNodeHandler.UpdateStatus(ctx, node1, metav1.UpdateOptions{})
|
_, err = fakeNodeHandler.UpdateStatus(ctx, node1, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(err.Error())
|
t.Error(err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,7 +338,7 @@ func (a *HorizontalController) computeReplicasForMetrics(ctx context.Context, hp
|
|||||||
// return an error and set the condition of the hpa based on the first invalid metric.
|
// return an error and set the condition of the hpa based on the first invalid metric.
|
||||||
// Otherwise set the condition as scaling active as we're going to scale
|
// Otherwise set the condition as scaling active as we're going to scale
|
||||||
if invalidMetricsCount >= len(metricSpecs) || (invalidMetricsCount > 0 && replicas < specReplicas) {
|
if invalidMetricsCount >= len(metricSpecs) || (invalidMetricsCount > 0 && replicas < specReplicas) {
|
||||||
setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, invalidMetricCondition.Message)
|
setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, "%s", invalidMetricCondition.Message)
|
||||||
return -1, "", statuses, time.Time{}, invalidMetricError
|
return -1, "", statuses, time.Time{}, invalidMetricError
|
||||||
}
|
}
|
||||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric)
|
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric)
|
||||||
@ -385,15 +385,15 @@ func (a *HorizontalController) validateAndParseSelector(hpa *autoscalingv2.Horiz
|
|||||||
errMsg := "selector is required"
|
errMsg := "selector is required"
|
||||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
|
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
|
||||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector")
|
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector")
|
||||||
return nil, fmt.Errorf(errMsg)
|
return nil, errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
parsedSelector, err := labels.Parse(selector)
|
parsedSelector, err := labels.Parse(selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err)
|
errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err)
|
||||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
|
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
|
||||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", errMsg)
|
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "%s", errMsg)
|
||||||
return nil, fmt.Errorf(errMsg)
|
return nil, errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
hpaKey := selectors.Key{Name: hpa.Name, Namespace: hpa.Namespace}
|
hpaKey := selectors.Key{Name: hpa.Name, Namespace: hpa.Namespace}
|
||||||
@ -413,8 +413,8 @@ func (a *HorizontalController) validateAndParseSelector(hpa *autoscalingv2.Horiz
|
|||||||
if len(selectingHpas) > 1 {
|
if len(selectingHpas) > 1 {
|
||||||
errMsg := fmt.Sprintf("pods by selector %v are controlled by multiple HPAs: %v", selector, selectingHpas)
|
errMsg := fmt.Sprintf("pods by selector %v are controlled by multiple HPAs: %v", selector, selectingHpas)
|
||||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "AmbiguousSelector", errMsg)
|
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "AmbiguousSelector", errMsg)
|
||||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "AmbiguousSelector", errMsg)
|
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "AmbiguousSelector", "%s", errMsg)
|
||||||
return nil, fmt.Errorf(errMsg)
|
return nil, errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return parsedSelector, nil
|
return parsedSelector, nil
|
||||||
@ -570,7 +570,7 @@ func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, status
|
|||||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||||
}
|
}
|
||||||
errMsg := "invalid object metric source: neither a value target nor an average value target was set"
|
errMsg := "invalid object metric source: neither a value target nor an average value target was set"
|
||||||
err = fmt.Errorf(errMsg)
|
err = errors.New(errMsg)
|
||||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
|
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
|
||||||
return 0, time.Time{}, "", condition, err
|
return 0, time.Time{}, "", condition, err
|
||||||
}
|
}
|
||||||
@ -617,7 +617,7 @@ func (a *HorizontalController) computeStatusForResourceMetricGeneric(ctx context
|
|||||||
|
|
||||||
if target.AverageUtilization == nil {
|
if target.AverageUtilization == nil {
|
||||||
errMsg := "invalid resource metric source: neither an average utilization target nor an average value (usage) target was set"
|
errMsg := "invalid resource metric source: neither an average utilization target nor an average value (usage) target was set"
|
||||||
return 0, nil, time.Time{}, "", condition, fmt.Errorf(errMsg)
|
return 0, nil, time.Time{}, "", condition, errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
targetUtilization := *target.AverageUtilization
|
targetUtilization := *target.AverageUtilization
|
||||||
@ -719,9 +719,9 @@ func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, stat
|
|||||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||||
}
|
}
|
||||||
errMsg := "invalid external metric source: neither a value target nor an average value target was set"
|
errMsg := "invalid external metric source: neither a value target nor an average value target was set"
|
||||||
err = fmt.Errorf(errMsg)
|
err = errors.New(errMsg)
|
||||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err)
|
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err)
|
||||||
return 0, time.Time{}, "", condition, fmt.Errorf(errMsg)
|
return 0, time.Time{}, "", condition, errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *HorizontalController) recordInitialRecommendation(currentReplicas int32, key string) {
|
func (a *HorizontalController) recordInitialRecommendation(currentReplicas int32, key string) {
|
||||||
@ -950,12 +950,12 @@ func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.Horiz
|
|||||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
|
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
|
||||||
}
|
}
|
||||||
|
|
||||||
desiredReplicas, condition, reason := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas)
|
desiredReplicas, reason, message := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas)
|
||||||
|
|
||||||
if desiredReplicas == stabilizedRecommendation {
|
if desiredReplicas == stabilizedRecommendation {
|
||||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, condition, reason)
|
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, "%s", message)
|
||||||
} else {
|
} else {
|
||||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, condition, reason)
|
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, "%s", message)
|
||||||
}
|
}
|
||||||
|
|
||||||
return desiredReplicas
|
return desiredReplicas
|
||||||
@ -991,15 +991,15 @@ func (a *HorizontalController) normalizeDesiredReplicasWithBehaviors(hpa *autosc
|
|||||||
normalizationArg.DesiredReplicas = stabilizedRecommendation
|
normalizationArg.DesiredReplicas = stabilizedRecommendation
|
||||||
if stabilizedRecommendation != prenormalizedDesiredReplicas {
|
if stabilizedRecommendation != prenormalizedDesiredReplicas {
|
||||||
// "ScaleUpStabilized" || "ScaleDownStabilized"
|
// "ScaleUpStabilized" || "ScaleDownStabilized"
|
||||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, reason, message)
|
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, reason, "%s", message)
|
||||||
} else {
|
} else {
|
||||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
|
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
|
||||||
}
|
}
|
||||||
desiredReplicas, reason, message := a.convertDesiredReplicasWithBehaviorRate(normalizationArg)
|
desiredReplicas, reason, message := a.convertDesiredReplicasWithBehaviorRate(normalizationArg)
|
||||||
if desiredReplicas == stabilizedRecommendation {
|
if desiredReplicas == stabilizedRecommendation {
|
||||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, message)
|
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, "%s", message)
|
||||||
} else {
|
} else {
|
||||||
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, message)
|
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, "%s", message)
|
||||||
}
|
}
|
||||||
|
|
||||||
return desiredReplicas
|
return desiredReplicas
|
||||||
|
@ -82,7 +82,7 @@ func statusOkWithOverrides(overrides ...autoscalingv2.HorizontalPodAutoscalerCon
|
|||||||
resv2 := make([]autoscalingv2.HorizontalPodAutoscalerCondition, len(statusOk))
|
resv2 := make([]autoscalingv2.HorizontalPodAutoscalerCondition, len(statusOk))
|
||||||
copy(resv2, statusOk)
|
copy(resv2, statusOk)
|
||||||
for _, override := range overrides {
|
for _, override := range overrides {
|
||||||
resv2 = setConditionInList(resv2, override.Type, override.Status, override.Reason, override.Message)
|
resv2 = setConditionInList(resv2, override.Type, override.Status, override.Reason, "%s", override.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy to a v1 slice
|
// copy to a v1 slice
|
||||||
|
@ -824,35 +824,35 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) {
|
|||||||
logger, ctx := ktesting.NewTestContext(t)
|
logger, ctx := ktesting.NewTestContext(t)
|
||||||
ssc, spc, om, _ := newFakeStatefulSetController(ctx, set)
|
ssc, spc, om, _ := newFakeStatefulSetController(ctx, set)
|
||||||
if err := scaleUpStatefulSetController(logger, set, ssc, spc, om); err != nil {
|
if err := scaleUpStatefulSetController(logger, set, ssc, spc, om); err != nil {
|
||||||
t.Errorf(onPolicy("Failed to turn up StatefulSet : %s", err))
|
t.Error(onPolicy("Failed to turn up StatefulSet : %s", err))
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
if set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name); err != nil {
|
if set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name); err != nil {
|
||||||
t.Errorf(onPolicy("Could not get scaled up set: %v", err))
|
t.Error(onPolicy("Could not get scaled up set: %v", err))
|
||||||
}
|
}
|
||||||
if set.Status.Replicas != 3 {
|
if set.Status.Replicas != 3 {
|
||||||
t.Errorf(onPolicy("set.Status.Replicas = %v; want 3", set.Status.Replicas))
|
t.Error(onPolicy("set.Status.Replicas = %v; want 3", set.Status.Replicas))
|
||||||
}
|
}
|
||||||
*set.Spec.Replicas = 2
|
*set.Spec.Replicas = 2
|
||||||
if err := scaleDownStatefulSetController(logger, set, ssc, spc, om); err != nil {
|
if err := scaleDownStatefulSetController(logger, set, ssc, spc, om); err != nil {
|
||||||
t.Errorf(onPolicy("Failed to scale down StatefulSet : msg, %s", err))
|
t.Error(onPolicy("Failed to scale down StatefulSet : msg, %s", err))
|
||||||
}
|
}
|
||||||
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||||
}
|
}
|
||||||
if set.Status.Replicas != 2 {
|
if set.Status.Replicas != 2 {
|
||||||
t.Errorf(onPolicy("Failed to scale statefulset to 2 replicas"))
|
t.Error(onPolicy("Failed to scale statefulset to 2 replicas"))
|
||||||
}
|
}
|
||||||
|
|
||||||
var claim *v1.PersistentVolumeClaim
|
var claim *v1.PersistentVolumeClaim
|
||||||
claim, err = om.claimsLister.PersistentVolumeClaims(set.Namespace).Get("datadir-foo-2")
|
claim, err = om.claimsLister.PersistentVolumeClaims(set.Namespace).Get("datadir-foo-2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(onPolicy("Could not find expected pvc datadir-foo-2"))
|
t.Error(onPolicy("Could not find expected pvc datadir-foo-2"))
|
||||||
}
|
}
|
||||||
refs := claim.GetOwnerReferences()
|
refs := claim.GetOwnerReferences()
|
||||||
if len(refs) != 1 {
|
if len(refs) != 1 {
|
||||||
t.Errorf(onPolicy("Expected only one refs: %v", refs))
|
t.Error(onPolicy("Expected only one refs: %v", refs))
|
||||||
}
|
}
|
||||||
// Make the pod ref stale.
|
// Make the pod ref stale.
|
||||||
for i := range refs {
|
for i := range refs {
|
||||||
@ -863,29 +863,29 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) {
|
|||||||
}
|
}
|
||||||
claim.SetOwnerReferences(refs)
|
claim.SetOwnerReferences(refs)
|
||||||
if err = om.claimsIndexer.Update(claim); err != nil {
|
if err = om.claimsIndexer.Update(claim); err != nil {
|
||||||
t.Errorf(onPolicy("Could not update claim with new owner ref: %v", err))
|
t.Error(onPolicy("Could not update claim with new owner ref: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
*set.Spec.Replicas = 3
|
*set.Spec.Replicas = 3
|
||||||
// Until the stale PVC goes away, the scale up should never finish. Run 10 iterations, then delete the PVC.
|
// Until the stale PVC goes away, the scale up should never finish. Run 10 iterations, then delete the PVC.
|
||||||
if err := scaleUpStatefulSetControllerBounded(logger, set, ssc, spc, om, 10); err != nil {
|
if err := scaleUpStatefulSetControllerBounded(logger, set, ssc, spc, om, 10); err != nil {
|
||||||
t.Errorf(onPolicy("Failed attempt to scale StatefulSet back up: %v", err))
|
t.Error(onPolicy("Failed attempt to scale StatefulSet back up: %v", err))
|
||||||
}
|
}
|
||||||
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||||
}
|
}
|
||||||
if set.Status.Replicas != 2 {
|
if set.Status.Replicas != 2 {
|
||||||
t.Errorf(onPolicy("Expected set to stay at two replicas"))
|
t.Error(onPolicy("Expected set to stay at two replicas"))
|
||||||
}
|
}
|
||||||
|
|
||||||
claim, err = om.claimsLister.PersistentVolumeClaims(set.Namespace).Get("datadir-foo-2")
|
claim, err = om.claimsLister.PersistentVolumeClaims(set.Namespace).Get("datadir-foo-2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(onPolicy("Could not find expected pvc datadir-foo-2"))
|
t.Error(onPolicy("Could not find expected pvc datadir-foo-2"))
|
||||||
}
|
}
|
||||||
refs = claim.GetOwnerReferences()
|
refs = claim.GetOwnerReferences()
|
||||||
if len(refs) != 1 {
|
if len(refs) != 1 {
|
||||||
t.Errorf(onPolicy("Unexpected change to condemned pvc ownerRefs: %v", refs))
|
t.Error(onPolicy("Unexpected change to condemned pvc ownerRefs: %v", refs))
|
||||||
}
|
}
|
||||||
foundPodRef := false
|
foundPodRef := false
|
||||||
for i := range refs {
|
for i := range refs {
|
||||||
@ -895,21 +895,21 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !foundPodRef {
|
if !foundPodRef {
|
||||||
t.Errorf(onPolicy("Claim ref unexpectedly changed: %v", refs))
|
t.Error(onPolicy("Claim ref unexpectedly changed: %v", refs))
|
||||||
}
|
}
|
||||||
if err = om.claimsIndexer.Delete(claim); err != nil {
|
if err = om.claimsIndexer.Delete(claim); err != nil {
|
||||||
t.Errorf(onPolicy("Could not delete stale pvc: %v", err))
|
t.Error(onPolicy("Could not delete stale pvc: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := scaleUpStatefulSetController(logger, set, ssc, spc, om); err != nil {
|
if err := scaleUpStatefulSetController(logger, set, ssc, spc, om); err != nil {
|
||||||
t.Errorf(onPolicy("Failed to scale StatefulSet back up: %v", err))
|
t.Error(onPolicy("Failed to scale StatefulSet back up: %v", err))
|
||||||
}
|
}
|
||||||
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
t.Error(onPolicy("Could not get scaled down StatefulSet: %v", err))
|
||||||
}
|
}
|
||||||
if set.Status.Replicas != 3 {
|
if set.Status.Replicas != 3 {
|
||||||
t.Errorf(onPolicy("Failed to scale set back up once PVC was deleted"))
|
t.Error(onPolicy("Failed to scale set back up once PVC was deleted"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -235,7 +235,7 @@ func TestAssociations(t *testing.T) {
|
|||||||
// Run consistency check after every operation.
|
// Run consistency check after every operation.
|
||||||
err := consistencyCheck(multimap)
|
err := consistencyCheck(multimap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, expect := range tc.want {
|
for _, expect := range tc.want {
|
||||||
@ -261,7 +261,7 @@ func TestEfficientAssociation(t *testing.T) {
|
|||||||
|
|
||||||
err := forwardSelect(key("hpa-1"), key("pod-1"), key("pod-2"))(m)
|
err := forwardSelect(key("hpa-1"), key("pod-1"), key("pod-2"))(m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(err.Error())
|
t.Error(err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@ reference them.
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -524,7 +525,7 @@ func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeN
|
|||||||
// should not happen
|
// should not happen
|
||||||
errMsg := fmt.Sprintf("Failed to set statusUpdateNeeded to needed %t, because nodeName=%q does not exist",
|
errMsg := fmt.Sprintf("Failed to set statusUpdateNeeded to needed %t, because nodeName=%q does not exist",
|
||||||
needed, nodeName)
|
needed, nodeName)
|
||||||
return fmt.Errorf(errMsg)
|
return errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeToUpdate.statusUpdateNeeded = needed
|
nodeToUpdate.statusUpdateNeeded = needed
|
||||||
|
@ -18,6 +18,7 @@ package expand
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
@ -28,7 +29,7 @@ import (
|
|||||||
|
|
||||||
authenticationv1 "k8s.io/api/authentication/v1"
|
authenticationv1 "k8s.io/api/authentication/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/runtime"
|
"k8s.io/apimachinery/pkg/util/runtime"
|
||||||
@ -205,7 +206,7 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
pvc, err := expc.pvcLister.PersistentVolumeClaims(namespace).Get(name)
|
pvc, err := expc.pvcLister.PersistentVolumeClaims(namespace).Get(name)
|
||||||
if errors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
logger := klog.FromContext(ctx)
|
logger := klog.FromContext(ctx)
|
||||||
@ -256,14 +257,14 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
errorMsg := fmt.Sprintf("error getting CSI driver name for pvc %s, with error %v", key, err)
|
errorMsg := fmt.Sprintf("error getting CSI driver name for pvc %s, with error %v", key, err)
|
||||||
expc.recorder.Event(pvc, v1.EventTypeWarning, events.ExternalExpanding, errorMsg)
|
expc.recorder.Event(pvc, v1.EventTypeWarning, events.ExternalExpanding, errorMsg)
|
||||||
return fmt.Errorf(errorMsg)
|
return errors.New(errorMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
pvc, err := util.SetClaimResizer(pvc, csiResizerName, expc.kubeClient)
|
pvc, err := util.SetClaimResizer(pvc, csiResizerName, expc.kubeClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorMsg := fmt.Sprintf("error setting resizer annotation to pvc %s, with error %v", key, err)
|
errorMsg := fmt.Sprintf("error setting resizer annotation to pvc %s, with error %v", key, err)
|
||||||
expc.recorder.Event(pvc, v1.EventTypeWarning, events.ExternalExpanding, errorMsg)
|
expc.recorder.Event(pvc, v1.EventTypeWarning, events.ExternalExpanding, errorMsg)
|
||||||
return fmt.Errorf(errorMsg)
|
return errors.New(errorMsg)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@ package persistentvolume
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@ -1630,7 +1631,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(
|
|||||||
strerr := fmt.Sprintf("plugin %q is not a CSI plugin. Only CSI plugin can provision a claim with a datasource", pluginName)
|
strerr := fmt.Sprintf("plugin %q is not a CSI plugin. Only CSI plugin can provision a claim with a datasource", pluginName)
|
||||||
logger.V(2).Info(strerr)
|
logger.V(2).Info(strerr)
|
||||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||||
return pluginName, fmt.Errorf(strerr)
|
return pluginName, errors.New(strerr)
|
||||||
|
|
||||||
}
|
}
|
||||||
provisionerName := storageClass.Provisioner
|
provisionerName := storageClass.Provisioner
|
||||||
|
@ -176,7 +176,7 @@ func LoadAndValidateData(data []byte, requireNonWebhookTypes sets.Set[authzconfi
|
|||||||
sets.NewString(modes.AuthorizationModeChoices...),
|
sets.NewString(modes.AuthorizationModeChoices...),
|
||||||
sets.NewString(repeatableAuthorizerTypes...),
|
sets.NewString(repeatableAuthorizerTypes...),
|
||||||
); len(errors) != 0 {
|
); len(errors) != 0 {
|
||||||
return nil, fmt.Errorf(errors.ToAggregate().Error())
|
return nil, errors.ToAggregate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// test to check if the authorizer names passed conform to the authorizers for type!=Webhook
|
// test to check if the authorizer names passed conform to the authorizers for type!=Webhook
|
||||||
|
@ -20,6 +20,7 @@ limitations under the License.
|
|||||||
package cm
|
package cm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -112,7 +113,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
|||||||
if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil {
|
if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil {
|
||||||
message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err)
|
message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err)
|
||||||
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
||||||
return fmt.Errorf(message)
|
return errors.New(message)
|
||||||
}
|
}
|
||||||
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName)
|
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName)
|
||||||
}
|
}
|
||||||
@ -121,7 +122,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
|||||||
if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil {
|
if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil {
|
||||||
message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err)
|
message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err)
|
||||||
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
||||||
return fmt.Errorf(message)
|
return errors.New(message)
|
||||||
}
|
}
|
||||||
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName)
|
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName)
|
||||||
}
|
}
|
||||||
|
@ -274,7 +274,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
|
|||||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Internal PreStartContainer hook failed: %v", s.Message())
|
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Internal PreStartContainer hook failed: %v", s.Message())
|
||||||
return s.Message(), ErrPreStartHook
|
return s.Message(), ErrPreStartHook
|
||||||
}
|
}
|
||||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, fmt.Sprintf("Created container %s", container.Name))
|
m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, "Created container: %v", container.Name)
|
||||||
|
|
||||||
// Step 3: start the container.
|
// Step 3: start the container.
|
||||||
err = m.runtimeService.StartContainer(ctx, containerID)
|
err = m.runtimeService.StartContainer(ctx, containerID)
|
||||||
@ -283,7 +283,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb
|
|||||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Error: %v", s.Message())
|
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Error: %v", s.Message())
|
||||||
return s.Message(), kubecontainer.ErrRunContainer
|
return s.Message(), kubecontainer.ErrRunContainer
|
||||||
}
|
}
|
||||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.StartedContainer, fmt.Sprintf("Started container %s", container.Name))
|
m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.StartedContainer, "Started container %v", container.Name)
|
||||||
|
|
||||||
// Symlink container logs to the legacy container log location for cluster logging
|
// Symlink container logs to the legacy container log location for cluster logging
|
||||||
// support.
|
// support.
|
||||||
@ -780,7 +780,7 @@ func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.P
|
|||||||
if len(message) == 0 {
|
if len(message) == 0 {
|
||||||
message = fmt.Sprintf("Stopping container %s", containerSpec.Name)
|
message = fmt.Sprintf("Stopping container %s", containerSpec.Name)
|
||||||
}
|
}
|
||||||
m.recordContainerEvent(pod, containerSpec, containerID.ID, v1.EventTypeNormal, events.KillingContainer, message)
|
m.recordContainerEvent(pod, containerSpec, containerID.ID, v1.EventTypeNormal, events.KillingContainer, "%v", message)
|
||||||
|
|
||||||
if gracePeriodOverride != nil {
|
if gracePeriodOverride != nil {
|
||||||
gracePeriod = *gracePeriodOverride
|
gracePeriod = *gracePeriodOverride
|
||||||
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||||||
package nodeshutdown
|
package nodeshutdown
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
@ -60,7 +59,7 @@ func TestLocalStorage(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
nowStr := now.Format(time.RFC3339Nano)
|
nowStr := now.Format(time.RFC3339Nano)
|
||||||
wantRaw := fmt.Sprintf(`{"startTime":"` + nowStr + `","endTime":"` + nowStr + `"}`)
|
wantRaw := `{"startTime":"` + nowStr + `","endTime":"` + nowStr + `"}`
|
||||||
if string(raw) != wantRaw {
|
if string(raw) != wantRaw {
|
||||||
t.Errorf("got %s, want %s", string(raw), wantRaw)
|
t.Errorf("got %s, want %s", string(raw), wantRaw)
|
||||||
return
|
return
|
||||||
|
@ -21,6 +21,7 @@ keep track of registered plugins.
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -100,7 +101,7 @@ func (plugin *PluginInfo) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, d
|
|||||||
// that can be used in logs.
|
// that can be used in logs.
|
||||||
// The msg format follows the pattern "<prefixMsg> <plugin details>: <err> ",
|
// The msg format follows the pattern "<prefixMsg> <plugin details>: <err> ",
|
||||||
func (plugin *PluginInfo) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
func (plugin *PluginInfo) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
||||||
return fmt.Errorf(plugin.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
return errors.New(plugin.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateError returns simple and detailed errors for plugins to register
|
// GenerateError returns simple and detailed errors for plugins to register
|
||||||
@ -108,7 +109,7 @@ func (plugin *PluginInfo) GenerateErrorDetailed(prefixMsg string, err error) (de
|
|||||||
// The msg format follows the pattern "<prefixMsg> <plugin details>: <err> ".
|
// The msg format follows the pattern "<prefixMsg> <plugin details>: <err> ".
|
||||||
func (plugin *PluginInfo) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
func (plugin *PluginInfo) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
||||||
simpleMsg, detailedMsg := plugin.GenerateMsg(prefixMsg, errSuffix(err))
|
simpleMsg, detailedMsg := plugin.GenerateMsg(prefixMsg, errSuffix(err))
|
||||||
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
|
return errors.New(simpleMsg), errors.New(detailedMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generates an error string with the format ": <err>" if err exists
|
// Generates an error string with the format ": <err>" if err exists
|
||||||
|
@ -80,7 +80,7 @@ func TestTCPPortExhaustion(t *testing.T) {
|
|||||||
{"HTTP", true},
|
{"HTTP", true},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(fmt.Sprintf(tt.name), func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
testRootDir := ""
|
testRootDir := ""
|
||||||
if tempDir, err := os.MkdirTemp("", "kubelet_test."); err != nil {
|
if tempDir, err := os.MkdirTemp("", "kubelet_test."); err != nil {
|
||||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||||
|
@ -19,6 +19,7 @@ package scheduler
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
@ -287,7 +288,7 @@ func (h *HTTPExtender) Filter(
|
|||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
if result.Error != "" {
|
if result.Error != "" {
|
||||||
return nil, nil, nil, fmt.Errorf(result.Error)
|
return nil, nil, nil, errors.New(result.Error)
|
||||||
}
|
}
|
||||||
|
|
||||||
if h.nodeCacheCapable && result.NodeNames != nil {
|
if h.nodeCacheCapable && result.NodeNames != nil {
|
||||||
@ -373,7 +374,7 @@ func (h *HTTPExtender) Bind(binding *v1.Binding) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if result.Error != "" {
|
if result.Error != "" {
|
||||||
return fmt.Errorf(result.Error)
|
return errors.New(result.Error)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
3
pkg/scheduler/internal/cache/cache.go
vendored
3
pkg/scheduler/internal/cache/cache.go
vendored
@ -18,6 +18,7 @@ package cache
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -272,7 +273,7 @@ func (cache *cacheImpl) UpdateSnapshot(logger klog.Logger, nodeSnapshot *Snapsho
|
|||||||
// We will try to recover by re-creating the lists for the next scheduling cycle, but still return an
|
// We will try to recover by re-creating the lists for the next scheduling cycle, but still return an
|
||||||
// error to surface the problem, the error will likely cause a failure to the current scheduling cycle.
|
// error to surface the problem, the error will likely cause a failure to the current scheduling cycle.
|
||||||
cache.updateNodeInfoSnapshotList(logger, nodeSnapshot, true)
|
cache.updateNodeInfoSnapshotList(logger, nodeSnapshot, true)
|
||||||
return fmt.Errorf(errMsg)
|
return errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -3766,7 +3766,7 @@ func setupTestSchedulerWithVolumeBinding(ctx context.Context, t *testing.T, volu
|
|||||||
// end with a period.
|
// end with a period.
|
||||||
func makePredicateError(failReason string) error {
|
func makePredicateError(failReason string) error {
|
||||||
s := fmt.Sprintf("0/1 nodes are available: %v.", failReason)
|
s := fmt.Sprintf("0/1 nodes are available: %v.", failReason)
|
||||||
return fmt.Errorf(s)
|
return errors.New(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustNewPodInfo(t *testing.T, pod *v1.Pod) *framework.PodInfo {
|
func mustNewPodInfo(t *testing.T, pod *v1.Pod) *framework.PodInfo {
|
||||||
|
@ -269,7 +269,7 @@ func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, deviceMounterArgs volume.DeviceMounterArgs) error {
|
func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, deviceMounterArgs volume.DeviceMounterArgs) error {
|
||||||
klog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
|
klog.V(4).Info(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath))
|
||||||
|
|
||||||
if deviceMountPath == "" {
|
if deviceMountPath == "" {
|
||||||
return errors.New(log("attacher.MountDevice failed, deviceMountPath is empty"))
|
return errors.New(log("attacher.MountDevice failed, deviceMountPath is empty"))
|
||||||
@ -363,7 +363,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
|||||||
// finished, we should remove the directory.
|
// finished, we should remove the directory.
|
||||||
if err != nil && volumetypes.IsOperationFinishedError(err) {
|
if err != nil && volumetypes.IsOperationFinishedError(err) {
|
||||||
// clean up metadata
|
// clean up metadata
|
||||||
klog.Errorf(log("attacher.MountDevice failed: %v", err))
|
klog.Error(log("attacher.MountDevice failed: %v", err))
|
||||||
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
|
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
|
||||||
klog.Error(log("attacher.MountDevice failed to remove mount dir after error [%s]: %v", deviceMountPath, err))
|
klog.Error(log("attacher.MountDevice failed to remove mount dir after error [%s]: %v", deviceMountPath, err))
|
||||||
}
|
}
|
||||||
@ -377,7 +377,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !stageUnstageSet {
|
if !stageUnstageSet {
|
||||||
klog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
klog.Info(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
||||||
// defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there.
|
// defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -415,7 +415,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
|
klog.V(4).Info(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -604,7 +604,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
|
klog.Error(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -627,7 +627,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
|
|||||||
return errors.New(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err))
|
return errors.New(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err))
|
||||||
}
|
}
|
||||||
if !stageUnstageSet {
|
if !stageUnstageSet {
|
||||||
klog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
|
klog.Info(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice..."))
|
||||||
// Just delete the global directory + json file
|
// Just delete the global directory + json file
|
||||||
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
|
if err := removeMountDir(c.plugin, deviceMountPath); err != nil {
|
||||||
return errors.New(log("failed to clean up global mount %s: %s", dataDir, err))
|
return errors.New(log("failed to clean up global mount %s: %s", dataDir, err))
|
||||||
@ -650,7 +650,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error {
|
|||||||
return errors.New(log("failed to clean up global mount %s: %s", dataDir, err))
|
return errors.New(log("failed to clean up global mount %s: %s", dataDir, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeUnStageVolume [%s]", deviceMountPath))
|
klog.V(4).Info(log("attacher.UnmountDevice successfully requested NodeUnStageVolume [%s]", deviceMountPath))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@ var _ volume.CustomBlockVolumeMapper = &csiBlockMapper{}
|
|||||||
// Example: plugins/kubernetes.io/csi/volumeDevices/{specName}/dev
|
// Example: plugins/kubernetes.io/csi/volumeDevices/{specName}/dev
|
||||||
func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||||
dir := getVolumeDevicePluginDir(m.specName, m.plugin.host)
|
dir := getVolumeDevicePluginDir(m.specName, m.plugin.host)
|
||||||
klog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir))
|
klog.V(4).Info(log("blockMapper.GetGlobalMapPath = %s", dir))
|
||||||
return dir, nil
|
return dir, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -137,7 +137,7 @@ func (m *csiBlockMapper) getPublishPath() string {
|
|||||||
// returns: pods/{podUID}/volumeDevices/kubernetes.io~csi, {specName}
|
// returns: pods/{podUID}/volumeDevices/kubernetes.io~csi, {specName}
|
||||||
func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) {
|
func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) {
|
||||||
path := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, utilstrings.EscapeQualifiedName(CSIPluginName))
|
path := m.plugin.host.GetPodVolumeDeviceDir(m.podUID, utilstrings.EscapeQualifiedName(CSIPluginName))
|
||||||
klog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, m.specName))
|
klog.V(4).Info(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, m.specName))
|
||||||
return path, m.specName
|
return path, m.specName
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -149,10 +149,10 @@ func (m *csiBlockMapper) stageVolumeForBlock(
|
|||||||
csiSource *v1.CSIPersistentVolumeSource,
|
csiSource *v1.CSIPersistentVolumeSource,
|
||||||
attachment *storage.VolumeAttachment,
|
attachment *storage.VolumeAttachment,
|
||||||
) (string, error) {
|
) (string, error) {
|
||||||
klog.V(4).Infof(log("blockMapper.stageVolumeForBlock called"))
|
klog.V(4).Info(log("blockMapper.stageVolumeForBlock called"))
|
||||||
|
|
||||||
stagingPath := m.GetStagingPath()
|
stagingPath := m.GetStagingPath()
|
||||||
klog.V(4).Infof(log("blockMapper.stageVolumeForBlock stagingPath set [%s]", stagingPath))
|
klog.V(4).Info(log("blockMapper.stageVolumeForBlock stagingPath set [%s]", stagingPath))
|
||||||
|
|
||||||
// Check whether "STAGE_UNSTAGE_VOLUME" is set
|
// Check whether "STAGE_UNSTAGE_VOLUME" is set
|
||||||
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
|
stageUnstageSet, err := csi.NodeSupportsStageUnstage(ctx)
|
||||||
@ -160,7 +160,7 @@ func (m *csiBlockMapper) stageVolumeForBlock(
|
|||||||
return "", errors.New(log("blockMapper.stageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
|
return "", errors.New(log("blockMapper.stageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
|
||||||
}
|
}
|
||||||
if !stageUnstageSet {
|
if !stageUnstageSet {
|
||||||
klog.Infof(log("blockMapper.stageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
klog.Info(log("blockMapper.stageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice..."))
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
publishVolumeInfo := map[string]string{}
|
publishVolumeInfo := map[string]string{}
|
||||||
@ -200,7 +200,7 @@ func (m *csiBlockMapper) stageVolumeForBlock(
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(4).Infof(log("blockMapper.stageVolumeForBlock successfully requested NodeStageVolume [%s]", stagingPath))
|
klog.V(4).Info(log("blockMapper.stageVolumeForBlock successfully requested NodeStageVolume [%s]", stagingPath))
|
||||||
return stagingPath, nil
|
return stagingPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,7 +212,7 @@ func (m *csiBlockMapper) publishVolumeForBlock(
|
|||||||
csiSource *v1.CSIPersistentVolumeSource,
|
csiSource *v1.CSIPersistentVolumeSource,
|
||||||
attachment *storage.VolumeAttachment,
|
attachment *storage.VolumeAttachment,
|
||||||
) (string, error) {
|
) (string, error) {
|
||||||
klog.V(4).Infof(log("blockMapper.publishVolumeForBlock called"))
|
klog.V(4).Info(log("blockMapper.publishVolumeForBlock called"))
|
||||||
|
|
||||||
publishVolumeInfo := map[string]string{}
|
publishVolumeInfo := map[string]string{}
|
||||||
if attachment != nil {
|
if attachment != nil {
|
||||||
@ -279,7 +279,7 @@ func (m *csiBlockMapper) publishVolumeForBlock(
|
|||||||
|
|
||||||
// SetUpDevice ensures the device is attached returns path where the device is located.
|
// SetUpDevice ensures the device is attached returns path where the device is located.
|
||||||
func (m *csiBlockMapper) SetUpDevice() (string, error) {
|
func (m *csiBlockMapper) SetUpDevice() (string, error) {
|
||||||
klog.V(4).Infof(log("blockMapper.SetUpDevice called"))
|
klog.V(4).Info(log("blockMapper.SetUpDevice called"))
|
||||||
|
|
||||||
// Get csiSource from spec
|
// Get csiSource from spec
|
||||||
if m.spec == nil {
|
if m.spec == nil {
|
||||||
@ -341,7 +341,7 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *csiBlockMapper) MapPodDevice() (string, error) {
|
func (m *csiBlockMapper) MapPodDevice() (string, error) {
|
||||||
klog.V(4).Infof(log("blockMapper.MapPodDevice called"))
|
klog.V(4).Info(log("blockMapper.MapPodDevice called"))
|
||||||
|
|
||||||
// Get csiSource from spec
|
// Get csiSource from spec
|
||||||
if m.spec == nil {
|
if m.spec == nil {
|
||||||
@ -408,7 +408,7 @@ func (m *csiBlockMapper) unpublishVolumeForBlock(ctx context.Context, csi csiCli
|
|||||||
if err := csi.NodeUnpublishVolume(ctx, m.volumeID, publishPath); err != nil {
|
if err := csi.NodeUnpublishVolume(ctx, m.volumeID, publishPath); err != nil {
|
||||||
return errors.New(log("blockMapper.unpublishVolumeForBlock failed: %v", err))
|
return errors.New(log("blockMapper.unpublishVolumeForBlock failed: %v", err))
|
||||||
}
|
}
|
||||||
klog.V(4).Infof(log("blockMapper.unpublishVolumeForBlock NodeUnpublished successfully [%s]", publishPath))
|
klog.V(4).Info(log("blockMapper.unpublishVolumeForBlock NodeUnpublished successfully [%s]", publishPath))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -421,7 +421,7 @@ func (m *csiBlockMapper) unstageVolumeForBlock(ctx context.Context, csi csiClien
|
|||||||
return errors.New(log("blockMapper.unstageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
|
return errors.New(log("blockMapper.unstageVolumeForBlock failed to check STAGE_UNSTAGE_VOLUME capability: %v", err))
|
||||||
}
|
}
|
||||||
if !stageUnstageSet {
|
if !stageUnstageSet {
|
||||||
klog.Infof(log("blockMapper.unstageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping unstageVolumeForBlock ..."))
|
klog.Info(log("blockMapper.unstageVolumeForBlock STAGE_UNSTAGE_VOLUME capability not set. Skipping unstageVolumeForBlock ..."))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -431,7 +431,7 @@ func (m *csiBlockMapper) unstageVolumeForBlock(ctx context.Context, csi csiClien
|
|||||||
if err := csi.NodeUnstageVolume(ctx, m.volumeID, stagingPath); err != nil {
|
if err := csi.NodeUnstageVolume(ctx, m.volumeID, stagingPath); err != nil {
|
||||||
return errors.New(log("blockMapper.unstageVolumeForBlock failed: %v", err))
|
return errors.New(log("blockMapper.unstageVolumeForBlock failed: %v", err))
|
||||||
}
|
}
|
||||||
klog.V(4).Infof(log("blockMapper.unstageVolumeForBlock NodeUnstageVolume successfully [%s]", stagingPath))
|
klog.V(4).Info(log("blockMapper.unstageVolumeForBlock NodeUnstageVolume successfully [%s]", stagingPath))
|
||||||
|
|
||||||
// Remove stagingPath directory and its contents
|
// Remove stagingPath directory and its contents
|
||||||
if err := os.RemoveAll(stagingPath); err != nil {
|
if err := os.RemoveAll(stagingPath); err != nil {
|
||||||
@ -457,7 +457,7 @@ func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error
|
|||||||
stagingPath := m.GetStagingPath()
|
stagingPath := m.GetStagingPath()
|
||||||
if _, err := os.Stat(stagingPath); err != nil {
|
if _, err := os.Stat(stagingPath); err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
klog.V(4).Infof(log("blockMapper.TearDownDevice stagingPath(%s) has already been deleted, skip calling NodeUnstageVolume", stagingPath))
|
klog.V(4).Info(log("blockMapper.TearDownDevice stagingPath(%s) has already been deleted, skip calling NodeUnstageVolume", stagingPath))
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -101,7 +101,7 @@ func (c *csiMountMgr) SetUp(mounterArgs volume.MounterArgs) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||||
klog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir))
|
klog.V(4).Info(log("Mounter.SetUpAt(%s)", dir))
|
||||||
|
|
||||||
csi, err := c.csiClientGetter.Get()
|
csi, err := c.csiClientGetter.Get()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -346,7 +346,7 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
|||||||
klog.V(4).Info(log("mounter.SetupAt fsGroup [%d] applied successfully to %s", *mounterArgs.FsGroup, c.volumeID))
|
klog.V(4).Info(log("mounter.SetupAt fsGroup [%d] applied successfully to %s", *mounterArgs.FsGroup, c.volumeID))
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
|
klog.V(4).Info(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -358,7 +358,7 @@ func (c *csiMountMgr) podServiceAccountTokenAttrs() (map[string]string, error) {
|
|||||||
csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName))
|
csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
klog.V(5).Infof(log("CSIDriver %q not found, not adding service account token information", c.driverName))
|
klog.V(5).Info(log("CSIDriver %q not found, not adding service account token information", c.driverName))
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -394,7 +394,7 @@ func (c *csiMountMgr) podServiceAccountTokenAttrs() (map[string]string, error) {
|
|||||||
outputs[audience] = tr.Status
|
outputs[audience] = tr.Status
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(4).Infof(log("Fetched service account token attrs for CSIDriver %q", c.driverName))
|
klog.V(4).Info(log("Fetched service account token attrs for CSIDriver %q", c.driverName))
|
||||||
tokens, _ := json.Marshal(outputs)
|
tokens, _ := json.Marshal(outputs)
|
||||||
return map[string]string{
|
return map[string]string{
|
||||||
"csi.storage.k8s.io/serviceAccount.tokens": string(tokens),
|
"csi.storage.k8s.io/serviceAccount.tokens": string(tokens),
|
||||||
@ -416,7 +416,7 @@ func (c *csiMountMgr) TearDown() error {
|
|||||||
return c.TearDownAt(c.GetPath())
|
return c.TearDownAt(c.GetPath())
|
||||||
}
|
}
|
||||||
func (c *csiMountMgr) TearDownAt(dir string) error {
|
func (c *csiMountMgr) TearDownAt(dir string) error {
|
||||||
klog.V(4).Infof(log("Unmounter.TearDownAt(%s)", dir))
|
klog.V(4).Info(log("Unmounter.TearDownAt(%s)", dir))
|
||||||
|
|
||||||
volID := c.volumeID
|
volID := c.volumeID
|
||||||
csi, err := c.csiClientGetter.Get()
|
csi, err := c.csiClientGetter.Get()
|
||||||
@ -447,7 +447,7 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
|
|||||||
if err := removeMountDir(c.plugin, dir); err != nil {
|
if err := removeMountDir(c.plugin, dir); err != nil {
|
||||||
return errors.New(log("Unmounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err))
|
return errors.New(log("Unmounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err))
|
||||||
}
|
}
|
||||||
klog.V(4).Infof(log("Unmounter.TearDownAt successfully unmounted dir [%s]", dir))
|
klog.V(4).Info(log("Unmounter.TearDownAt successfully unmounted dir [%s]", dir))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ var PluginHandler = &RegistrationHandler{}
|
|||||||
// ValidatePlugin is called by kubelet's plugin watcher upon detection
|
// ValidatePlugin is called by kubelet's plugin watcher upon detection
|
||||||
// of a new registration socket opened by CSI Driver registrar side car.
|
// of a new registration socket opened by CSI Driver registrar side car.
|
||||||
func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string) error {
|
func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string) error {
|
||||||
klog.Infof(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s",
|
klog.Info(log("Trying to validate a new CSI Driver with name: %s endpoint: %s versions: %s",
|
||||||
pluginName, endpoint, strings.Join(versions, ",")))
|
pluginName, endpoint, strings.Join(versions, ",")))
|
||||||
|
|
||||||
_, err := h.validateVersions("ValidatePlugin", pluginName, endpoint, versions)
|
_, err := h.validateVersions("ValidatePlugin", pluginName, endpoint, versions)
|
||||||
@ -110,7 +110,7 @@ func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string,
|
|||||||
|
|
||||||
// RegisterPlugin is called when a plugin can be registered
|
// RegisterPlugin is called when a plugin can be registered
|
||||||
func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string, versions []string, pluginClientTimeout *time.Duration) error {
|
func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string, versions []string, pluginClientTimeout *time.Duration) error {
|
||||||
klog.Infof(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint))
|
klog.Info(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint))
|
||||||
|
|
||||||
highestSupportedVersion, err := h.validateVersions("RegisterPlugin", pluginName, endpoint, versions)
|
highestSupportedVersion, err := h.validateVersions("RegisterPlugin", pluginName, endpoint, versions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -432,7 +432,7 @@ func (p *csiPlugin) NewMounter(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) {
|
func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) {
|
||||||
klog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
|
klog.V(4).Info(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID))
|
||||||
|
|
||||||
kvh, ok := p.host.(volume.KubeletVolumeHost)
|
kvh, ok := p.host.(volume.KubeletVolumeHost)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -697,7 +697,7 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod) (vo
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||||
klog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
|
klog.V(4).Info(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID))
|
||||||
unmapper := &csiBlockMapper{
|
unmapper := &csiBlockMapper{
|
||||||
plugin: p,
|
plugin: p,
|
||||||
podUID: podUID,
|
podUID: podUID,
|
||||||
@ -839,7 +839,7 @@ func (p *csiPlugin) podInfoEnabled(driverName string) (bool, error) {
|
|||||||
csiDriver, err := p.getCSIDriver(driverName)
|
csiDriver, err := p.getCSIDriver(driverName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
klog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", driverName))
|
klog.V(4).Info(log("CSIDriver %q not found, not adding pod information", driverName))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
@ -847,7 +847,7 @@ func (p *csiPlugin) podInfoEnabled(driverName string) (bool, error) {
|
|||||||
|
|
||||||
// if PodInfoOnMount is not set or false we do not set pod attributes
|
// if PodInfoOnMount is not set or false we do not set pod attributes
|
||||||
if csiDriver.Spec.PodInfoOnMount == nil || *csiDriver.Spec.PodInfoOnMount == false {
|
if csiDriver.Spec.PodInfoOnMount == nil || *csiDriver.Spec.PodInfoOnMount == false {
|
||||||
klog.V(4).Infof(log("CSIDriver %q does not require pod information", driverName))
|
klog.V(4).Info(log("CSIDriver %q does not require pod information", driverName))
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
@ -36,7 +36,7 @@ func (c *csiPlugin) RequiresFSResize() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *csiPlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) {
|
func (c *csiPlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) {
|
||||||
klog.V(4).Infof(log("Expander.NodeExpand(%s)", resizeOptions.DeviceMountPath))
|
klog.V(4).Info(log("Expander.NodeExpand(%s)", resizeOptions.DeviceMountPath))
|
||||||
csiSource, err := getCSISourceFromSpec(resizeOptions.VolumeSpec)
|
csiSource, err := getCSISourceFromSpec(resizeOptions.VolumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.New(log("Expander.NodeExpand failed to get CSI persistent source: %v", err))
|
return false, errors.New(log("Expander.NodeExpand failed to get CSI persistent source: %v", err))
|
||||||
|
@ -321,7 +321,7 @@ func (step stepName) getName() string { return step.name }
|
|||||||
func doVerifyLinesInFile(t *testing.T, volumePath, filename string, expected string) {
|
func doVerifyLinesInFile(t *testing.T, volumePath, filename string, expected string) {
|
||||||
data, err := os.ReadFile(filepath.Join(volumePath, filename))
|
data, err := os.ReadFile(filepath.Join(volumePath, filename))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf(err.Error())
|
t.Error(err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
actualStr := string(data)
|
actualStr := string(data)
|
||||||
@ -357,7 +357,7 @@ type verifyMode struct {
|
|||||||
func (step verifyMode) run(test *downwardAPITest) {
|
func (step verifyMode) run(test *downwardAPITest) {
|
||||||
fileInfo, err := os.Stat(filepath.Join(test.volumePath, step.name))
|
fileInfo, err := os.Stat(filepath.Join(test.volumePath, step.name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
test.t.Errorf(err.Error())
|
test.t.Error(err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -266,7 +266,7 @@ func handleCmdResponse(cmd string, output []byte) (*DriverStatus, error) {
|
|||||||
return nil, errors.New(status.Status)
|
return nil, errors.New(status.Status)
|
||||||
} else if status.Status != StatusSuccess {
|
} else if status.Status != StatusSuccess {
|
||||||
errMsg := fmt.Sprintf("%s command failed, status: %s, reason: %s", cmd, status.Status, status.Message)
|
errMsg := fmt.Sprintf("%s command failed, status: %s, reason: %s", cmd, status.Status, status.Message)
|
||||||
klog.Errorf(errMsg)
|
klog.Error(errMsg)
|
||||||
return nil, fmt.Errorf("%s", errMsg)
|
return nil, fmt.Errorf("%s", errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,7 +366,7 @@ func getPortworxService(host volume.VolumeHost) (*v1.Service, error) {
|
|||||||
kubeClient := host.GetKubeClient()
|
kubeClient := host.GetKubeClient()
|
||||||
if kubeClient == nil {
|
if kubeClient == nil {
|
||||||
err := fmt.Errorf("failed to get kubeclient when creating portworx client")
|
err := fmt.Errorf("failed to get kubeclient when creating portworx client")
|
||||||
klog.Errorf(err.Error())
|
klog.Error(err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -379,7 +379,7 @@ func getPortworxService(host volume.VolumeHost) (*v1.Service, error) {
|
|||||||
|
|
||||||
if svc == nil {
|
if svc == nil {
|
||||||
err = fmt.Errorf("service: %v not found. Consult Portworx docs to deploy it", pxServiceName)
|
err = fmt.Errorf("service: %v not found. Consult Portworx docs to deploy it", pxServiceName)
|
||||||
klog.Errorf(err.Error())
|
klog.Error(err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package secret
|
package secret
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
@ -24,7 +25,7 @@ import (
|
|||||||
utilstrings "k8s.io/utils/strings"
|
utilstrings "k8s.io/utils/strings"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
@ -184,7 +185,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs
|
|||||||
optional := b.source.Optional != nil && *b.source.Optional
|
optional := b.source.Optional != nil && *b.source.Optional
|
||||||
secret, err := b.getSecret(b.pod.Namespace, b.source.SecretName)
|
secret, err := b.getSecret(b.pod.Namespace, b.source.SecretName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !(errors.IsNotFound(err) && optional) {
|
if !(apierrors.IsNotFound(err) && optional) {
|
||||||
klog.Errorf("Couldn't get secret %v/%v: %v", b.pod.Namespace, b.source.SecretName, err)
|
klog.Errorf("Couldn't get secret %v/%v: %v", b.pod.Namespace, b.source.SecretName, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -276,8 +277,8 @@ func MakePayload(mappings []v1.KeyToPath, secret *v1.Secret, defaultMode *int32,
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
errMsg := fmt.Sprintf("references non-existent secret key: %s", ktp.Key)
|
errMsg := fmt.Sprintf("references non-existent secret key: %s", ktp.Key)
|
||||||
klog.Errorf(errMsg)
|
klog.Error(errMsg)
|
||||||
return nil, fmt.Errorf(errMsg)
|
return nil, errors.New(errMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
fileProjection.Data = []byte(content)
|
fileProjection.Data = []byte(content)
|
||||||
|
@ -131,7 +131,7 @@ func (ne *NodeExpander) expandOnPlugin() (bool, error, testResponseData) {
|
|||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg := ne.vmt.GenerateErrorDetailed("MountVolume.NodeExpandVolume failed to mark node expansion in progress: %v", err)
|
msg := ne.vmt.GenerateErrorDetailed("MountVolume.NodeExpandVolume failed to mark node expansion in progress: %v", err)
|
||||||
klog.Errorf(msg.Error())
|
klog.Error(msg.Error())
|
||||||
return false, err, testResponseData{}
|
return false, err, testResponseData{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -143,12 +143,12 @@ func (ne *NodeExpander) expandOnPlugin() (bool, error, testResponseData) {
|
|||||||
if volumetypes.IsInfeasibleError(resizeErr) || ne.markExpansionInfeasibleOnFailure {
|
if volumetypes.IsInfeasibleError(resizeErr) || ne.markExpansionInfeasibleOnFailure {
|
||||||
ne.pvc, markFailedError = util.MarkNodeExpansionInfeasible(ne.pvc, ne.kubeClient, resizeErr)
|
ne.pvc, markFailedError = util.MarkNodeExpansionInfeasible(ne.pvc, ne.kubeClient, resizeErr)
|
||||||
if markFailedError != nil {
|
if markFailedError != nil {
|
||||||
klog.Errorf(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error())
|
klog.Error(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ne.pvc, markFailedError = util.MarkNodeExpansionFailedCondition(ne.pvc, ne.kubeClient, resizeErr)
|
ne.pvc, markFailedError = util.MarkNodeExpansionFailedCondition(ne.pvc, ne.kubeClient, resizeErr)
|
||||||
if markFailedError != nil {
|
if markFailedError != nil {
|
||||||
klog.Errorf(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error())
|
klog.Error(ne.vmt.GenerateErrorDetailed("MountMount.NodeExpandVolume failed to mark node expansion as failed: %v", err).Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -158,7 +158,7 @@ func (ne *NodeExpander) expandOnPlugin() (bool, error, testResponseData) {
|
|||||||
// expansion operation should not block mounting
|
// expansion operation should not block mounting
|
||||||
if volumetypes.IsFailedPreconditionError(resizeErr) {
|
if volumetypes.IsFailedPreconditionError(resizeErr) {
|
||||||
ne.actualStateOfWorld.MarkForInUseExpansionError(ne.vmt.VolumeName)
|
ne.actualStateOfWorld.MarkForInUseExpansionError(ne.vmt.VolumeName)
|
||||||
klog.Errorf(ne.vmt.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed with %v", resizeErr).Error())
|
klog.Error(ne.vmt.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed with %v", resizeErr).Error())
|
||||||
return false, nil, testResponseData{assumeResizeFinished: true, resizeCalledOnPlugin: true}
|
return false, nil, testResponseData{assumeResizeFinished: true, resizeCalledOnPlugin: true}
|
||||||
}
|
}
|
||||||
return false, resizeErr, testResponseData{assumeResizeFinished: true, resizeCalledOnPlugin: true}
|
return false, resizeErr, testResponseData{assumeResizeFinished: true, resizeCalledOnPlugin: true}
|
||||||
|
@ -370,13 +370,13 @@ func (volume *VolumeToAttach) GenerateMsg(prefixMsg, suffixMsg string) (simpleMs
|
|||||||
|
|
||||||
// GenerateErrorDetailed returns detailed errors for volumes to attach
|
// GenerateErrorDetailed returns detailed errors for volumes to attach
|
||||||
func (volume *VolumeToAttach) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
func (volume *VolumeToAttach) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
||||||
return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateError returns simple and detailed errors for volumes to attach
|
// GenerateError returns simple and detailed errors for volumes to attach
|
||||||
func (volume *VolumeToAttach) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
func (volume *VolumeToAttach) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
||||||
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
|
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
|
||||||
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
|
return errors.New(simpleMsg), errors.New(detailedMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// String combines key fields of the volume for logging in text format.
|
// String combines key fields of the volume for logging in text format.
|
||||||
@ -535,13 +535,13 @@ func (volume *VolumeToMount) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg
|
|||||||
|
|
||||||
// GenerateErrorDetailed returns detailed errors for volumes to mount
|
// GenerateErrorDetailed returns detailed errors for volumes to mount
|
||||||
func (volume *VolumeToMount) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
func (volume *VolumeToMount) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
||||||
return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateError returns simple and detailed errors for volumes to mount
|
// GenerateError returns simple and detailed errors for volumes to mount
|
||||||
func (volume *VolumeToMount) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
func (volume *VolumeToMount) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
||||||
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
|
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
|
||||||
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
|
return errors.New(simpleMsg), errors.New(detailedMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AttachedVolume represents a volume that is attached to a node.
|
// AttachedVolume represents a volume that is attached to a node.
|
||||||
@ -597,13 +597,13 @@ func (volume *AttachedVolume) GenerateMsg(prefixMsg, suffixMsg string) (simpleMs
|
|||||||
|
|
||||||
// GenerateErrorDetailed returns detailed errors for attached volumes
|
// GenerateErrorDetailed returns detailed errors for attached volumes
|
||||||
func (volume *AttachedVolume) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
func (volume *AttachedVolume) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
||||||
return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateError returns simple and detailed errors for attached volumes
|
// GenerateError returns simple and detailed errors for attached volumes
|
||||||
func (volume *AttachedVolume) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
func (volume *AttachedVolume) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
||||||
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
|
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
|
||||||
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
|
return errors.New(simpleMsg), errors.New(detailedMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// String combines key fields of the volume for logging in text format.
|
// String combines key fields of the volume for logging in text format.
|
||||||
@ -769,13 +769,13 @@ func (volume *MountedVolume) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg
|
|||||||
|
|
||||||
// GenerateErrorDetailed returns simple and detailed errors for mounted volumes
|
// GenerateErrorDetailed returns simple and detailed errors for mounted volumes
|
||||||
func (volume *MountedVolume) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
func (volume *MountedVolume) GenerateErrorDetailed(prefixMsg string, err error) (detailedErr error) {
|
||||||
return fmt.Errorf(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
return errors.New(volume.GenerateMsgDetailed(prefixMsg, errSuffix(err)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateError returns simple and detailed errors for mounted volumes
|
// GenerateError returns simple and detailed errors for mounted volumes
|
||||||
func (volume *MountedVolume) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
func (volume *MountedVolume) GenerateError(prefixMsg string, err error) (simpleErr, detailedErr error) {
|
||||||
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
|
simpleMsg, detailedMsg := volume.GenerateMsg(prefixMsg, errSuffix(err))
|
||||||
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
|
return errors.New(simpleMsg), errors.New(detailedMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
type operationExecutor struct {
|
type operationExecutor struct {
|
||||||
|
@ -197,7 +197,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc(
|
|||||||
volumePlugin, err :=
|
volumePlugin, err :=
|
||||||
og.volumePluginMgr.FindPluginBySpec(volumeAttached.VolumeSpec)
|
og.volumePluginMgr.FindPluginBySpec(volumeAttached.VolumeSpec)
|
||||||
if err != nil || volumePlugin == nil {
|
if err != nil || volumePlugin == nil {
|
||||||
klog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error())
|
klog.Error(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
volumeSpecList, pluginExists := volumesPerPlugin[volumePlugin.GetPluginName()]
|
volumeSpecList, pluginExists := volumesPerPlugin[volumePlugin.GetPluginName()]
|
||||||
@ -314,7 +314,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc(
|
|||||||
for _, pod := range volumeToAttach.ScheduledPods {
|
for _, pod := range volumeToAttach.ScheduledPods {
|
||||||
og.recorder.Eventf(pod, v1.EventTypeNormal, kevents.SuccessfulAttachVolume, simpleMsg)
|
og.recorder.Eventf(pod, v1.EventTypeNormal, kevents.SuccessfulAttachVolume, simpleMsg)
|
||||||
}
|
}
|
||||||
klog.Infof(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", ""))
|
klog.Info(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", ""))
|
||||||
|
|
||||||
// Update actual state of world
|
// Update actual state of world
|
||||||
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
|
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
|
||||||
@ -434,7 +434,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc(
|
|||||||
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
|
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.Infof(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", ""))
|
klog.Info(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", ""))
|
||||||
|
|
||||||
// Update actual state of world
|
// Update actual state of world
|
||||||
actualStateOfWorld.MarkVolumeAsDetached(
|
actualStateOfWorld.MarkVolumeAsDetached(
|
||||||
@ -647,7 +647,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
|
|||||||
// Considering the above situations, we mark volume as uncertain here so that reconciler will trigger
|
// Considering the above situations, we mark volume as uncertain here so that reconciler will trigger
|
||||||
// volume tear down when pod is deleted, and also makes sure pod will not start using it.
|
// volume tear down when pod is deleted, and also makes sure pod will not start using it.
|
||||||
if err := actualStateOfWorld.MarkVolumeMountAsUncertain(markOpts); err != nil {
|
if err := actualStateOfWorld.MarkVolumeMountAsUncertain(markOpts); err != nil {
|
||||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error())
|
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error())
|
||||||
}
|
}
|
||||||
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
|
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
|
||||||
}
|
}
|
||||||
@ -705,7 +705,7 @@ func (og *operationGenerator) markDeviceErrorState(volumeToMount VolumeToMount,
|
|||||||
// Only devices which were uncertain can be marked as unmounted
|
// Only devices which were uncertain can be marked as unmounted
|
||||||
markDeviceUnmountError := actualStateOfWorld.MarkDeviceAsUnmounted(volumeToMount.VolumeName)
|
markDeviceUnmountError := actualStateOfWorld.MarkDeviceAsUnmounted(volumeToMount.VolumeName)
|
||||||
if markDeviceUnmountError != nil {
|
if markDeviceUnmountError != nil {
|
||||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUnmounted failed", markDeviceUnmountError).Error())
|
klog.Error(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUnmounted failed", markDeviceUnmountError).Error())
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -716,7 +716,7 @@ func (og *operationGenerator) markDeviceErrorState(volumeToMount VolumeToMount,
|
|||||||
// which was previously marked as mounted here as uncertain.
|
// which was previously marked as mounted here as uncertain.
|
||||||
markDeviceUncertainError := actualStateOfWorld.MarkDeviceAsUncertain(volumeToMount.VolumeName, devicePath, deviceMountPath, volumeToMount.SELinuxLabel)
|
markDeviceUncertainError := actualStateOfWorld.MarkDeviceAsUncertain(volumeToMount.VolumeName, devicePath, deviceMountPath, volumeToMount.SELinuxLabel)
|
||||||
if markDeviceUncertainError != nil {
|
if markDeviceUncertainError != nil {
|
||||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainError).Error())
|
klog.Error(volumeToMount.GenerateErrorDetailed("MountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainError).Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -734,7 +734,7 @@ func (og *operationGenerator) markVolumeErrorState(volumeToMount VolumeToMount,
|
|||||||
|
|
||||||
t := actualStateOfWorld.MarkVolumeAsUnmounted(volumeToMount.PodName, volumeToMount.VolumeName)
|
t := actualStateOfWorld.MarkVolumeAsUnmounted(volumeToMount.PodName, volumeToMount.VolumeName)
|
||||||
if t != nil {
|
if t != nil {
|
||||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeAsUnmounted failed", t).Error())
|
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeAsUnmounted failed", t).Error())
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -744,7 +744,7 @@ func (og *operationGenerator) markVolumeErrorState(volumeToMount VolumeToMount,
|
|||||||
actualStateOfWorld.GetVolumeMountState(volumeToMount.VolumeName, markOpts.PodName) == VolumeNotMounted {
|
actualStateOfWorld.GetVolumeMountState(volumeToMount.VolumeName, markOpts.PodName) == VolumeNotMounted {
|
||||||
t := actualStateOfWorld.MarkVolumeMountAsUncertain(markOpts)
|
t := actualStateOfWorld.MarkVolumeMountAsUncertain(markOpts)
|
||||||
if t != nil {
|
if t != nil {
|
||||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", t).Error())
|
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", t).Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -792,7 +792,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc(
|
|||||||
markMountUncertainErr := actualStateOfWorld.MarkVolumeMountAsUncertain(opts)
|
markMountUncertainErr := actualStateOfWorld.MarkVolumeMountAsUncertain(opts)
|
||||||
if markMountUncertainErr != nil {
|
if markMountUncertainErr != nil {
|
||||||
// There is nothing else we can do. Hope that UnmountVolume will be re-tried shortly.
|
// There is nothing else we can do. Hope that UnmountVolume will be re-tried shortly.
|
||||||
klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeMountAsUncertain failed", markMountUncertainErr).Error())
|
klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeMountAsUncertain failed", markMountUncertainErr).Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// On failure, return error. Caller will log and retry.
|
// On failure, return error. Caller will log and retry.
|
||||||
@ -815,7 +815,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc(
|
|||||||
volumeToUnmount.PodName, volumeToUnmount.VolumeName)
|
volumeToUnmount.PodName, volumeToUnmount.VolumeName)
|
||||||
if markVolMountedErr != nil {
|
if markVolMountedErr != nil {
|
||||||
// On failure, just log and exit
|
// On failure, just log and exit
|
||||||
klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error())
|
klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumetypes.NewOperationContext(nil, nil, migrated)
|
return volumetypes.NewOperationContext(nil, nil, migrated)
|
||||||
@ -866,7 +866,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc(
|
|||||||
// If the mount path could not be found, don't fail the unmount, but instead log a warning and proceed,
|
// If the mount path could not be found, don't fail the unmount, but instead log a warning and proceed,
|
||||||
// using the value from deviceToDetach.DeviceMountPath, so that the device can be marked as unmounted
|
// using the value from deviceToDetach.DeviceMountPath, so that the device can be marked as unmounted
|
||||||
deviceMountPath = deviceToDetach.DeviceMountPath
|
deviceMountPath = deviceToDetach.DeviceMountPath
|
||||||
klog.Warningf(deviceToDetach.GenerateMsgDetailed(fmt.Sprintf(
|
klog.Warning(deviceToDetach.GenerateMsgDetailed(fmt.Sprintf(
|
||||||
"GetDeviceMountPath failed, but unmount operation will proceed using deviceMountPath=%s: %v", deviceMountPath, err), ""))
|
"GetDeviceMountPath failed, but unmount operation will proceed using deviceMountPath=%s: %v", deviceMountPath, err), ""))
|
||||||
}
|
}
|
||||||
refs, err := deviceMountableVolumePlugin.GetDeviceMountRefs(deviceMountPath)
|
refs, err := deviceMountableVolumePlugin.GetDeviceMountRefs(deviceMountPath)
|
||||||
@ -885,7 +885,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc(
|
|||||||
markDeviceUncertainErr := actualStateOfWorld.MarkDeviceAsUncertain(deviceToDetach.VolumeName, deviceToDetach.DevicePath, deviceMountPath, deviceToDetach.SELinuxMountContext)
|
markDeviceUncertainErr := actualStateOfWorld.MarkDeviceAsUncertain(deviceToDetach.VolumeName, deviceToDetach.DevicePath, deviceMountPath, deviceToDetach.SELinuxMountContext)
|
||||||
if markDeviceUncertainErr != nil {
|
if markDeviceUncertainErr != nil {
|
||||||
// There is nothing else we can do. Hope that UnmountDevice will be re-tried shortly.
|
// There is nothing else we can do. Hope that UnmountDevice will be re-tried shortly.
|
||||||
klog.Errorf(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error())
|
klog.Error(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// On failure, return error. Caller will log and retry.
|
// On failure, return error. Caller will log and retry.
|
||||||
@ -906,7 +906,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc(
|
|||||||
markDeviceUncertainErr := actualStateOfWorld.MarkDeviceAsUncertain(deviceToDetach.VolumeName, deviceToDetach.DevicePath, deviceMountPath, deviceToDetach.SELinuxMountContext)
|
markDeviceUncertainErr := actualStateOfWorld.MarkDeviceAsUncertain(deviceToDetach.VolumeName, deviceToDetach.DevicePath, deviceMountPath, deviceToDetach.SELinuxMountContext)
|
||||||
if markDeviceUncertainErr != nil {
|
if markDeviceUncertainErr != nil {
|
||||||
// There is nothing else we can do. Hope that UnmountDevice will be re-tried shortly.
|
// There is nothing else we can do. Hope that UnmountDevice will be re-tried shortly.
|
||||||
klog.Errorf(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error())
|
klog.Error(deviceToDetach.GenerateErrorDetailed("UnmountDevice.MarkDeviceAsUncertain failed", markDeviceUncertainErr).Error())
|
||||||
}
|
}
|
||||||
eventErr, detailedErr := deviceToDetach.GenerateError(
|
eventErr, detailedErr := deviceToDetach.GenerateError(
|
||||||
"UnmountDevice failed",
|
"UnmountDevice failed",
|
||||||
@ -1151,7 +1151,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc(
|
|||||||
// Considering the above situations, we mark volume as uncertain here so that reconciler will trigger
|
// Considering the above situations, we mark volume as uncertain here so that reconciler will trigger
|
||||||
// volume tear down when pod is deleted, and also makes sure pod will not start using it.
|
// volume tear down when pod is deleted, and also makes sure pod will not start using it.
|
||||||
if err := actualStateOfWorld.MarkVolumeMountAsUncertain(markVolumeOpts); err != nil {
|
if err := actualStateOfWorld.MarkVolumeMountAsUncertain(markVolumeOpts); err != nil {
|
||||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error())
|
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.MarkVolumeMountAsUncertain failed", err).Error())
|
||||||
}
|
}
|
||||||
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
|
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
|
||||||
}
|
}
|
||||||
@ -1270,7 +1270,7 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc(
|
|||||||
volumeToUnmount.PodName, volumeToUnmount.VolumeName)
|
volumeToUnmount.PodName, volumeToUnmount.VolumeName)
|
||||||
if markVolUnmountedErr != nil {
|
if markVolUnmountedErr != nil {
|
||||||
// On failure, just log and exit
|
// On failure, just log and exit
|
||||||
klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error())
|
klog.Error(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumetypes.NewOperationContext(nil, nil, migrated)
|
return volumetypes.NewOperationContext(nil, nil, migrated)
|
||||||
@ -1384,7 +1384,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc(
|
|||||||
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
|
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.Infof(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", ""))
|
klog.Info(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", ""))
|
||||||
|
|
||||||
// Update actual state of world
|
// Update actual state of world
|
||||||
markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted(
|
markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted(
|
||||||
@ -1519,7 +1519,7 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach(
|
|||||||
node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(volumeToDetach.NodeName), metav1.GetOptions{})
|
node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(volumeToDetach.NodeName), metav1.GetOptions{})
|
||||||
if fetchErr != nil {
|
if fetchErr != nil {
|
||||||
if errors.IsNotFound(fetchErr) {
|
if errors.IsNotFound(fetchErr) {
|
||||||
klog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", ""))
|
klog.Warning(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", ""))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1536,7 +1536,7 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Volume is not marked as in use by node
|
// Volume is not marked as in use by node
|
||||||
klog.Infof(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", ""))
|
klog.Info(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", ""))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1960,7 +1960,7 @@ func (og *operationGenerator) doOnlineExpansion(volumeToMount VolumeToMount,
|
|||||||
resizeDone, err := og.nodeExpandVolume(volumeToMount, actualStateOfWorld, resizeOptions)
|
resizeDone, err := og.nodeExpandVolume(volumeToMount, actualStateOfWorld, resizeOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e1, e2 := volumeToMount.GenerateError("NodeExpandVolume.NodeExpandVolume failed", err)
|
e1, e2 := volumeToMount.GenerateError("NodeExpandVolume.NodeExpandVolume failed", err)
|
||||||
klog.Errorf(e2.Error())
|
klog.Error(e2.Error())
|
||||||
return false, e1, e2
|
return false, e1, e2
|
||||||
}
|
}
|
||||||
if resizeDone {
|
if resizeDone {
|
||||||
@ -1991,7 +1991,7 @@ func (og *operationGenerator) expandVolumeDuringMount(volumeToMount VolumeToMoun
|
|||||||
if pvcStatusCap.Cmp(pvSpecCap) < 0 {
|
if pvcStatusCap.Cmp(pvSpecCap) < 0 {
|
||||||
if volumeToMount.VolumeSpec.ReadOnly {
|
if volumeToMount.VolumeSpec.ReadOnly {
|
||||||
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume failed", "requested read-only file system")
|
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume failed", "requested read-only file system")
|
||||||
klog.Warningf(detailedMsg)
|
klog.Warning(detailedMsg)
|
||||||
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
|
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
|
||||||
og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
|
og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -2057,7 +2057,7 @@ func (og *operationGenerator) nodeExpandVolume(
|
|||||||
|
|
||||||
if volumeToMount.VolumeSpec.ReadOnly {
|
if volumeToMount.VolumeSpec.ReadOnly {
|
||||||
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume failed", "requested read-only file system")
|
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume failed", "requested read-only file system")
|
||||||
klog.Warningf(detailedMsg)
|
klog.Warning(detailedMsg)
|
||||||
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
|
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
|
||||||
og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
|
og.recorder.Eventf(pvc, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -2097,7 +2097,7 @@ func (og *operationGenerator) checkForRecoveryFromExpansion(pvc *v1.PersistentVo
|
|||||||
// and hence we are going to keep expanding using older logic.
|
// and hence we are going to keep expanding using older logic.
|
||||||
if resizeStatus == "" && allocatedResource == nil {
|
if resizeStatus == "" && allocatedResource == nil {
|
||||||
_, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume running with", "older external resize controller")
|
_, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume running with", "older external resize controller")
|
||||||
klog.Warningf(detailedMsg)
|
klog.Warning(detailedMsg)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
@ -2139,7 +2139,7 @@ func (og *operationGenerator) legacyCallNodeExpandOnPlugin(resizeOp nodeResizeOp
|
|||||||
// expansion operation should not block mounting
|
// expansion operation should not block mounting
|
||||||
if volumetypes.IsFailedPreconditionError(resizeErr) {
|
if volumetypes.IsFailedPreconditionError(resizeErr) {
|
||||||
actualStateOfWorld.MarkForInUseExpansionError(volumeToMount.VolumeName)
|
actualStateOfWorld.MarkForInUseExpansionError(volumeToMount.VolumeName)
|
||||||
klog.Errorf(volumeToMount.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed", resizeErr).Error())
|
klog.Error(volumeToMount.GenerateErrorDetailed("MountVolume.NodeExapndVolume failed", resizeErr).Error())
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, resizeErr
|
return false, resizeErr
|
||||||
|
@ -18,11 +18,12 @@ package recyclerclient
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
@ -72,7 +73,7 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po
|
|||||||
// Start the pod
|
// Start the pod
|
||||||
_, err = recyclerClient.CreatePod(pod)
|
_, err = recyclerClient.CreatePod(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.IsAlreadyExists(err) {
|
if apierrors.IsAlreadyExists(err) {
|
||||||
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
|
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
|
||||||
if deleteErr != nil {
|
if deleteErr != nil {
|
||||||
return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr)
|
return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr)
|
||||||
@ -128,7 +129,7 @@ func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.E
|
|||||||
}
|
}
|
||||||
if pod.Status.Phase == v1.PodFailed {
|
if pod.Status.Phase == v1.PodFailed {
|
||||||
if pod.Status.Message != "" {
|
if pod.Status.Message != "" {
|
||||||
return fmt.Errorf(pod.Status.Message)
|
return errors.New(pod.Status.Message)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("pod failed, pod.Status.Message unknown")
|
return fmt.Errorf("pod failed, pod.Status.Message unknown")
|
||||||
}
|
}
|
||||||
|
@ -259,7 +259,7 @@ func TestSafeMakeDir(t *testing.T) {
|
|||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
base, err := ioutil.TempDir("", "safe-make-dir-"+test.name+"-")
|
base, err := ioutil.TempDir("", "safe-make-dir-"+test.name+"-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(base)
|
defer os.RemoveAll(base)
|
||||||
test.prepare(base)
|
test.prepare(base)
|
||||||
@ -385,7 +385,7 @@ func TestRemoveEmptyDirs(t *testing.T) {
|
|||||||
klog.V(4).Infof("test %q", test.name)
|
klog.V(4).Infof("test %q", test.name)
|
||||||
base, err := ioutil.TempDir("", "remove-empty-dirs-"+test.name+"-")
|
base, err := ioutil.TempDir("", "remove-empty-dirs-"+test.name+"-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
if err = test.prepare(base); err != nil {
|
if err = test.prepare(base); err != nil {
|
||||||
os.RemoveAll(base)
|
os.RemoveAll(base)
|
||||||
@ -615,7 +615,7 @@ func TestCleanSubPaths(t *testing.T) {
|
|||||||
klog.V(4).Infof("test %q", test.name)
|
klog.V(4).Infof("test %q", test.name)
|
||||||
base, err := ioutil.TempDir("", "clean-subpaths-"+test.name+"-")
|
base, err := ioutil.TempDir("", "clean-subpaths-"+test.name+"-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
mounts, err := test.prepare(base)
|
mounts, err := test.prepare(base)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -872,7 +872,7 @@ func TestBindSubPath(t *testing.T) {
|
|||||||
klog.V(4).Infof("test %q", test.name)
|
klog.V(4).Infof("test %q", test.name)
|
||||||
base, err := ioutil.TempDir("", "bind-subpath-"+test.name+"-")
|
base, err := ioutil.TempDir("", "bind-subpath-"+test.name+"-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
mounts, volPath, subPath, err := test.prepare(base)
|
mounts, volPath, subPath, err := test.prepare(base)
|
||||||
@ -986,7 +986,7 @@ func TestSubpath_PrepareSafeSubpath(t *testing.T) {
|
|||||||
klog.V(4).Infof("test %q", test.name)
|
klog.V(4).Infof("test %q", test.name)
|
||||||
base, err := ioutil.TempDir("", "bind-subpath-"+test.name+"-")
|
base, err := ioutil.TempDir("", "bind-subpath-"+test.name+"-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(base)
|
defer os.RemoveAll(base)
|
||||||
|
|
||||||
@ -1220,7 +1220,7 @@ func TestSafeOpen(t *testing.T) {
|
|||||||
klog.V(4).Infof("test %q", test.name)
|
klog.V(4).Infof("test %q", test.name)
|
||||||
base, err := ioutil.TempDir("", "safe-open-"+test.name+"-")
|
base, err := ioutil.TempDir("", "safe-open-"+test.name+"-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
test.prepare(base)
|
test.prepare(base)
|
||||||
@ -1367,7 +1367,7 @@ func TestFindExistingPrefix(t *testing.T) {
|
|||||||
klog.V(4).Infof("test %q", test.name)
|
klog.V(4).Infof("test %q", test.name)
|
||||||
base, err := ioutil.TempDir("", "find-prefix-"+test.name+"-")
|
base, err := ioutil.TempDir("", "find-prefix-"+test.name+"-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
test.prepare(base)
|
test.prepare(base)
|
||||||
path := filepath.Join(base, test.path)
|
path := filepath.Join(base, test.path)
|
||||||
|
@ -620,7 +620,7 @@ func TestValidateCustomResource(t *testing.T) {
|
|||||||
}
|
}
|
||||||
errs, _ := celValidator.Validate(context.TODO(), nil, structural, obj, oldObject, celconfig.RuntimeCELCostBudget)
|
errs, _ := celValidator.Validate(context.TODO(), nil, structural, obj, oldObject, celconfig.RuntimeCELCostBudget)
|
||||||
if len(errs) > 0 {
|
if len(errs) > 0 {
|
||||||
t.Errorf(errs.ToAggregate().Error())
|
t.Error(errs.ToAggregate().Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i, failingObject := range tt.failingObjects {
|
for i, failingObject := range tt.failingObjects {
|
||||||
|
@ -60,7 +60,7 @@ func TestLazyMapType(t *testing.T) {
|
|||||||
evalCounter++
|
evalCounter++
|
||||||
v, err := compileAndRun(env, activation, `{"a": "a"}`)
|
v, err := compileAndRun(env, activation, `{"a": "a"}`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.NewErr(err.Error())
|
return types.NewErr("%s", err.Error())
|
||||||
}
|
}
|
||||||
return v
|
return v
|
||||||
})
|
})
|
||||||
|
@ -2818,7 +2818,7 @@ func TestDeleteWithOptions(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
t.Logf(string(s))
|
t.Log(string(s))
|
||||||
}
|
}
|
||||||
if simpleStorage.deleted != ID {
|
if simpleStorage.deleted != ID {
|
||||||
t.Errorf("Unexpected delete: %s, expected %s", simpleStorage.deleted, ID)
|
t.Errorf("Unexpected delete: %s, expected %s", simpleStorage.deleted, ID)
|
||||||
@ -2858,7 +2858,7 @@ func TestDeleteWithOptionsQuery(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
t.Logf(string(s))
|
t.Log(string(s))
|
||||||
}
|
}
|
||||||
if simpleStorage.deleted != ID {
|
if simpleStorage.deleted != ID {
|
||||||
t.Fatalf("Unexpected delete: %s, expected %s", simpleStorage.deleted, ID)
|
t.Fatalf("Unexpected delete: %s, expected %s", simpleStorage.deleted, ID)
|
||||||
@ -2901,7 +2901,7 @@ func TestDeleteWithOptionsQueryAndBody(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
t.Logf(string(s))
|
t.Log(string(s))
|
||||||
}
|
}
|
||||||
if simpleStorage.deleted != ID {
|
if simpleStorage.deleted != ID {
|
||||||
t.Errorf("Unexpected delete: %s, expected %s", simpleStorage.deleted, ID)
|
t.Errorf("Unexpected delete: %s, expected %s", simpleStorage.deleted, ID)
|
||||||
|
@ -326,7 +326,7 @@ func TestSerializeObject(t *testing.T) {
|
|||||||
compressionEnabled: true,
|
compressionEnabled: true,
|
||||||
statusCode: http.StatusInternalServerError,
|
statusCode: http.StatusInternalServerError,
|
||||||
out: smallPayload,
|
out: smallPayload,
|
||||||
outErrs: []error{fmt.Errorf(string(largePayload)), fmt.Errorf("bad2")},
|
outErrs: []error{errors.New(string(largePayload)), errors.New("bad2")},
|
||||||
mediaType: "application/json",
|
mediaType: "application/json",
|
||||||
req: &http.Request{
|
req: &http.Request{
|
||||||
Header: http.Header{
|
Header: http.Header{
|
||||||
|
@ -50,7 +50,7 @@ var (
|
|||||||
func handleError(w http.ResponseWriter, r *http.Request, err error) {
|
func handleError(w http.ResponseWriter, r *http.Request, err error) {
|
||||||
errorMsg := fmt.Sprintf("Internal Server Error: %#v", r.RequestURI)
|
errorMsg := fmt.Sprintf("Internal Server Error: %#v", r.RequestURI)
|
||||||
http.Error(w, errorMsg, http.StatusInternalServerError)
|
http.Error(w, errorMsg, http.StatusInternalServerError)
|
||||||
klog.Errorf(err.Error())
|
klog.Error(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// requestWatermark is used to track maximal numbers of requests in a particular phase of handling
|
// requestWatermark is used to track maximal numbers of requests in a particular phase of handling
|
||||||
|
@ -1603,7 +1603,7 @@ func verifyEvents(t *testing.T, w watch.Interface, events []watch.Event, strictO
|
|||||||
if !valid {
|
if !valid {
|
||||||
t.Logf("(called from line %d)", line)
|
t.Logf("(called from line %d)", line)
|
||||||
for _, err := range errors {
|
for _, err := range errors {
|
||||||
t.Errorf(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -176,7 +176,7 @@ func (h *peerProxyHandler) WrapHandler(handler http.Handler) http.Handler {
|
|||||||
// TODO: maintain locally serviceable GVRs somewhere so that we dont have to
|
// TODO: maintain locally serviceable GVRs somewhere so that we dont have to
|
||||||
// consult the storageversion-informed map for those
|
// consult the storageversion-informed map for those
|
||||||
if len(serviceableByResp.peerEndpoints) == 0 {
|
if len(serviceableByResp.peerEndpoints) == 0 {
|
||||||
klog.Errorf(fmt.Sprintf("GVR %v is not served by anything in this cluster", gvr))
|
klog.Error(fmt.Sprintf("GVR %v is not served by anything in this cluster", gvr))
|
||||||
handler.ServeHTTP(w, r)
|
handler.ServeHTTP(w, r)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,7 @@ package printers
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -36,13 +37,13 @@ func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error {
|
|||||||
// we need an actual value in order to retrieve the package path for an object.
|
// we need an actual value in order to retrieve the package path for an object.
|
||||||
// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
|
// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
|
||||||
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
|
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
|
||||||
return fmt.Errorf(InternalObjectPrinterErr)
|
return errors.New(InternalObjectPrinterErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch obj := obj.(type) {
|
switch obj := obj.(type) {
|
||||||
case *metav1.WatchEvent:
|
case *metav1.WatchEvent:
|
||||||
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) {
|
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) {
|
||||||
return fmt.Errorf(InternalObjectPrinterErr)
|
return errors.New(InternalObjectPrinterErr)
|
||||||
}
|
}
|
||||||
data, err := json.Marshal(obj)
|
data, err := json.Marshal(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -19,6 +19,7 @@ package printers
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -119,7 +120,7 @@ func (j *JSONPathPrinter) PrintObj(obj runtime.Object, w io.Writer) error {
|
|||||||
// we need an actual value in order to retrieve the package path for an object.
|
// we need an actual value in order to retrieve the package path for an object.
|
||||||
// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
|
// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
|
||||||
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
|
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
|
||||||
return fmt.Errorf(InternalObjectPrinterErr)
|
return errors.New(InternalObjectPrinterErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
var queryObj interface{} = obj
|
var queryObj interface{} = obj
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package printers
|
package printers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -52,7 +53,7 @@ func (p *NamePrinter) PrintObj(obj runtime.Object, w io.Writer) error {
|
|||||||
// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
|
// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
|
||||||
// we need an actual value in order to retrieve the package path for an object.
|
// we need an actual value in order to retrieve the package path for an object.
|
||||||
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
|
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
|
||||||
return fmt.Errorf(InternalObjectPrinterErr)
|
return errors.New(InternalObjectPrinterErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if meta.IsListType(obj) {
|
if meta.IsListType(obj) {
|
||||||
|
@ -18,6 +18,7 @@ package printers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -61,7 +62,7 @@ func (p *GoTemplatePrinter) AllowMissingKeys(allow bool) {
|
|||||||
// PrintObj formats the obj with the Go Template.
|
// PrintObj formats the obj with the Go Template.
|
||||||
func (p *GoTemplatePrinter) PrintObj(obj runtime.Object, w io.Writer) error {
|
func (p *GoTemplatePrinter) PrintObj(obj runtime.Object, w io.Writer) error {
|
||||||
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
|
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
|
||||||
return fmt.Errorf(InternalObjectPrinterErr)
|
return errors.New(InternalObjectPrinterErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
var data []byte
|
var data []byte
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package printers
|
package printers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -42,7 +43,7 @@ func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error {
|
|||||||
// we need an actual value in order to retrieve the package path for an object.
|
// we need an actual value in order to retrieve the package path for an object.
|
||||||
// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
|
// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
|
||||||
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
|
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
|
||||||
return fmt.Errorf(InternalObjectPrinterErr)
|
return errors.New(InternalObjectPrinterErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
count := atomic.AddInt64(&p.printCount, 1)
|
count := atomic.AddInt64(&p.printCount, 1)
|
||||||
@ -55,7 +56,7 @@ func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error {
|
|||||||
switch obj := obj.(type) {
|
switch obj := obj.(type) {
|
||||||
case *metav1.WatchEvent:
|
case *metav1.WatchEvent:
|
||||||
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) {
|
if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) {
|
||||||
return fmt.Errorf(InternalObjectPrinterErr)
|
return errors.New(InternalObjectPrinterErr)
|
||||||
}
|
}
|
||||||
data, err := yaml.Marshal(obj)
|
data, err := yaml.Marshal(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1030,7 +1030,7 @@ func (b *Builder) visitByResource() *Result {
|
|||||||
if b.allNamespace {
|
if b.allNamespace {
|
||||||
errMsg = "a resource cannot be retrieved by name across all namespaces"
|
errMsg = "a resource cannot be retrieved by name across all namespaces"
|
||||||
}
|
}
|
||||||
return result.withError(fmt.Errorf(errMsg))
|
return result.withError(errors.New(errMsg))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1093,7 +1093,7 @@ func (b *Builder) visitByName() *Result {
|
|||||||
if b.allNamespace {
|
if b.allNamespace {
|
||||||
errMsg = "a resource cannot be retrieved by name across all namespaces"
|
errMsg = "a resource cannot be retrieved by name across all namespaces"
|
||||||
}
|
}
|
||||||
return result.withError(fmt.Errorf(errMsg))
|
return result.withError(errors.New(errMsg))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1873,7 +1873,7 @@ func TestHasNames(t *testing.T) {
|
|||||||
name: "test8",
|
name: "test8",
|
||||||
args: []string{"rc/foo", "bar"},
|
args: []string{"rc/foo", "bar"},
|
||||||
expectedHasName: false,
|
expectedHasName: false,
|
||||||
expectedError: fmt.Errorf("there is no need to specify a resource type as a separate argument when passing arguments in resource/name form (e.g. '" + basename + " get resource/<resource_name>' instead of '" + basename + " get resource resource/<resource_name>'"),
|
expectedError: errors.New("there is no need to specify a resource type as a separate argument when passing arguments in resource/name form (e.g. '" + basename + " get resource/<resource_name>' instead of '" + basename + " get resource resource/<resource_name>'"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
@ -299,7 +299,7 @@ func TestHTTPProxy(t *testing.T) {
|
|||||||
}))
|
}))
|
||||||
defer testProxyServer.Close()
|
defer testProxyServer.Close()
|
||||||
|
|
||||||
t.Logf(testProxyServer.URL)
|
t.Log(testProxyServer.URL)
|
||||||
|
|
||||||
u, err := url.Parse(testProxyServer.URL)
|
u, err := url.Parse(testProxyServer.URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -115,5 +115,5 @@ func (d *errorDecoderV4) decode(message []byte) error {
|
|||||||
return errors.New("error stream protocol error: unknown error")
|
return errors.New("error stream protocol error: unknown error")
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf(status.Message)
|
return errors.New(status.Message)
|
||||||
}
|
}
|
||||||
|
@ -159,7 +159,7 @@ func TestTLSConfigKey(t *testing.T) {
|
|||||||
|
|
||||||
shouldCacheA := valueA.Proxy == nil
|
shouldCacheA := valueA.Proxy == nil
|
||||||
if shouldCacheA != canCacheA {
|
if shouldCacheA != canCacheA {
|
||||||
t.Errorf("Unexpected canCache=false for " + nameA)
|
t.Error("Unexpected canCache=false for " + nameA)
|
||||||
}
|
}
|
||||||
|
|
||||||
configIsNotEmpty := !reflect.DeepEqual(*valueA, Config{})
|
configIsNotEmpty := !reflect.DeepEqual(*valueA, Config{})
|
||||||
|
@ -138,7 +138,7 @@ func (h WebhookHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
admissionResponse, err = h.AdmissionHandler(in.Request)
|
admissionResponse, err = h.AdmissionHandler(in.Request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e := fmt.Sprintf("error generating admission response: %v", err)
|
e := fmt.Sprintf("error generating admission response: %v", err)
|
||||||
klog.Errorf(e)
|
klog.Error(e)
|
||||||
statusCode = http.StatusInternalServerError
|
statusCode = http.StatusInternalServerError
|
||||||
http.Error(w, e, statusCode)
|
http.Error(w, e, statusCode)
|
||||||
return
|
return
|
||||||
|
@ -312,7 +312,7 @@ func (rc *RouteController) reconcile(ctx context.Context, nodes []*v1.Node, rout
|
|||||||
UID: types.UID(nodeName),
|
UID: types.UID(nodeName),
|
||||||
Namespace: "",
|
Namespace: "",
|
||||||
}, v1.EventTypeWarning, "FailedToCreateRoute", msg)
|
}, v1.EventTypeWarning, "FailedToCreateRoute", msg)
|
||||||
klog.V(4).Infof(msg)
|
klog.V(4).Info(msg)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -198,7 +198,7 @@ func TestReadLogs(t *testing.T) {
|
|||||||
err = ReadLogs(context.TODO(), nil, file.Name(), containerID, opts, fakeRuntimeService, stdoutBuf, stderrBuf)
|
err = ReadLogs(context.TODO(), nil, file.Name(), containerID, opts, fakeRuntimeService, stdoutBuf, stderrBuf)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(err.Error())
|
t.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
if stderrBuf.Len() > 0 {
|
if stderrBuf.Len() > 0 {
|
||||||
t.Fatalf("Stderr: %v", stderrBuf.String())
|
t.Fatalf("Stderr: %v", stderrBuf.String())
|
||||||
|
@ -206,7 +206,7 @@ func (o *AttachOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []s
|
|||||||
|
|
||||||
o.GetPodTimeout, err = cmdutil.GetPodRunningTimeoutFlag(cmd)
|
o.GetPodTimeout, err = cmdutil.GetPodRunningTimeoutFlag(cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cmdutil.UsageErrorf(cmd, err.Error())
|
return cmdutil.UsageErrorf(cmd, "%s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
o.Builder = f.NewBuilder
|
o.Builder = f.NewBuilder
|
||||||
|
@ -988,6 +988,6 @@ func cmpFileData(t *testing.T, filePath, data string) {
|
|||||||
type testWriter testing.T
|
type testWriter testing.T
|
||||||
|
|
||||||
func (t *testWriter) Write(p []byte) (n int, err error) {
|
func (t *testWriter) Write(p []byte) (n int, err error) {
|
||||||
t.Logf(string(p))
|
t.Log(string(p))
|
||||||
return len(p), nil
|
return len(p), nil
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@ package create
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -399,12 +400,12 @@ func parsePorts(portString string) (int32, intstr.IntOrString, error) {
|
|||||||
var targetPort intstr.IntOrString
|
var targetPort intstr.IntOrString
|
||||||
if portNum, err := strconv.Atoi(portStringSlice[1]); err != nil {
|
if portNum, err := strconv.Atoi(portStringSlice[1]); err != nil {
|
||||||
if errs := validation.IsValidPortName(portStringSlice[1]); len(errs) != 0 {
|
if errs := validation.IsValidPortName(portStringSlice[1]); len(errs) != 0 {
|
||||||
return 0, intstr.FromInt32(0), fmt.Errorf(strings.Join(errs, ","))
|
return 0, intstr.FromInt32(0), errors.New(strings.Join(errs, ","))
|
||||||
}
|
}
|
||||||
targetPort = intstr.FromString(portStringSlice[1])
|
targetPort = intstr.FromString(portStringSlice[1])
|
||||||
} else {
|
} else {
|
||||||
if errs := validation.IsValidPortNum(portNum); len(errs) != 0 {
|
if errs := validation.IsValidPortNum(portNum); len(errs) != 0 {
|
||||||
return 0, intstr.FromInt32(0), fmt.Errorf(strings.Join(errs, ","))
|
return 0, intstr.FromInt32(0), errors.New(strings.Join(errs, ","))
|
||||||
}
|
}
|
||||||
targetPort = intstr.FromInt32(int32(portNum))
|
targetPort = intstr.FromInt32(int32(portNum))
|
||||||
}
|
}
|
||||||
|
@ -531,7 +531,7 @@ func (o *DeleteOptions) confirmation(infos []*resource.Info) bool {
|
|||||||
|
|
||||||
fmt.Fprintf(o.Out, "%s/%s\n", kindString, info.Name)
|
fmt.Fprintf(o.Out, "%s/%s\n", kindString, info.Name)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(o.Out, i18n.T("Do you want to continue?")+" (y/n): ")
|
fmt.Fprint(o.Out, i18n.T("Do you want to continue?")+" (y/n): ")
|
||||||
var input string
|
var input string
|
||||||
_, err := fmt.Fscan(o.In, &input)
|
_, err := fmt.Fscan(o.In, &input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -161,7 +161,7 @@ func NewCmdDiff(f cmdutil.Factory, streams genericiooptions.IOStreams) *cobra.Co
|
|||||||
// command it means changes were found.
|
// command it means changes were found.
|
||||||
// Thus, it should return status code greater than 1.
|
// Thus, it should return status code greater than 1.
|
||||||
cmd.SetFlagErrorFunc(func(command *cobra.Command, err error) error {
|
cmd.SetFlagErrorFunc(func(command *cobra.Command, err error) error {
|
||||||
cmdutil.CheckDiffErr(cmdutil.UsageErrorf(cmd, err.Error()))
|
cmdutil.CheckDiffErr(cmdutil.UsageErrorf(cmd, "%s", err.Error()))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -243,7 +243,7 @@ func (o *DrainCmdOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
if len(args) == 0 && !cmd.Flags().Changed("selector") {
|
if len(args) == 0 && !cmd.Flags().Changed("selector") {
|
||||||
return cmdutil.UsageErrorf(cmd, fmt.Sprintf("USAGE: %s [flags]", cmd.Use))
|
return cmdutil.UsageErrorf(cmd, "USAGE: %s [flags]", cmd.Use)
|
||||||
}
|
}
|
||||||
if len(args) > 0 && len(o.drainer.Selector) > 0 {
|
if len(args) > 0 && len(o.drainer.Selector) > 0 {
|
||||||
return cmdutil.UsageErrorf(cmd, "error: cannot specify both a node name and a --selector option")
|
return cmdutil.UsageErrorf(cmd, "error: cannot specify both a node name and a --selector option")
|
||||||
|
@ -216,7 +216,7 @@ func (p *ExecOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, argsIn []s
|
|||||||
|
|
||||||
p.GetPodTimeout, err = cmdutil.GetPodRunningTimeoutFlag(cmd)
|
p.GetPodTimeout, err = cmdutil.GetPodRunningTimeoutFlag(cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cmdutil.UsageErrorf(cmd, err.Error())
|
return cmdutil.UsageErrorf(cmd, "%s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Builder = f.NewBuilder
|
p.Builder = f.NewBuilder
|
||||||
|
@ -19,6 +19,7 @@ package get
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -161,7 +162,7 @@ func (s *CustomColumnsPrinter) PrintObj(obj runtime.Object, out io.Writer) error
|
|||||||
// we need an actual value in order to retrieve the package path for an object.
|
// we need an actual value in order to retrieve the package path for an object.
|
||||||
// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
|
// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
|
||||||
if printers.InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
|
if printers.InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
|
||||||
return fmt.Errorf(printers.InternalObjectPrinterErr)
|
return errors.New(printers.InternalObjectPrinterErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, found := out.(*tabwriter.Writer); !found {
|
if _, found := out.(*tabwriter.Writer); !found {
|
||||||
@ -210,7 +211,7 @@ func (s *CustomColumnsPrinter) printOneObject(obj runtime.Object, parsers []*jso
|
|||||||
switch u := obj.(type) {
|
switch u := obj.(type) {
|
||||||
case *metav1.WatchEvent:
|
case *metav1.WatchEvent:
|
||||||
if printers.InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(u.Object.Object)).Type().PkgPath()) {
|
if printers.InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(u.Object.Object)).Type().PkgPath()) {
|
||||||
return fmt.Errorf(printers.InternalObjectPrinterErr)
|
return errors.New(printers.InternalObjectPrinterErr)
|
||||||
}
|
}
|
||||||
unstructuredObject, err := runtime.DefaultUnstructuredConverter.ToUnstructured(u.Object.Object)
|
unstructuredObject, err := runtime.DefaultUnstructuredConverter.ToUnstructured(u.Object.Object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -290,7 +290,7 @@ func (o *GetOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []stri
|
|||||||
usageString = fmt.Sprintf("%s\nUse \"%s explain <resource>\" for a detailed description of that resource (e.g. %[2]s explain pods).", usageString, fullCmdName)
|
usageString = fmt.Sprintf("%s\nUse \"%s explain <resource>\" for a detailed description of that resource (e.g. %[2]s explain pods).", usageString, fullCmdName)
|
||||||
}
|
}
|
||||||
|
|
||||||
return cmdutil.UsageErrorf(cmd, usageString)
|
return cmdutil.UsageErrorf(cmd, "%s", usageString)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -331,7 +331,7 @@ func (o *PortForwardOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg
|
|||||||
|
|
||||||
getPodTimeout, err := cmdutil.GetPodRunningTimeoutFlag(cmd)
|
getPodTimeout, err := cmdutil.GetPodRunningTimeoutFlag(cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cmdutil.UsageErrorf(cmd, err.Error())
|
return cmdutil.UsageErrorf(cmd, "%s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
resourceName := args[0]
|
resourceName := args[0]
|
||||||
|
@ -183,7 +183,7 @@ func (o *TaintOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st
|
|||||||
}
|
}
|
||||||
|
|
||||||
if o.taintsToAdd, o.taintsToRemove, err = parseTaints(taintArgs); err != nil {
|
if o.taintsToAdd, o.taintsToRemove, err = parseTaints(taintArgs); err != nil {
|
||||||
return cmdutil.UsageErrorf(cmd, err.Error())
|
return cmdutil.UsageErrorf(cmd, "%s", err.Error())
|
||||||
}
|
}
|
||||||
o.builder = f.NewBuilder().
|
o.builder = f.NewBuilder().
|
||||||
WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
|
WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
|
||||||
|
@ -243,7 +243,7 @@ func statusCausesToAggrError(scs []metav1.StatusCause) utilerrors.Aggregate {
|
|||||||
// commands.
|
// commands.
|
||||||
func StandardErrorMessage(err error) (string, bool) {
|
func StandardErrorMessage(err error) (string, bool) {
|
||||||
if debugErr, ok := err.(debugError); ok {
|
if debugErr, ok := err.(debugError); ok {
|
||||||
klog.V(4).Infof(debugErr.DebugError())
|
klog.V(4).Info(debugErr.DebugError())
|
||||||
}
|
}
|
||||||
status, isStatus := err.(apierrors.APIStatus)
|
status, isStatus := err.(apierrors.APIStatus)
|
||||||
switch {
|
switch {
|
||||||
|
@ -3521,8 +3521,8 @@ Events: <none>
|
|||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if out != test.output {
|
if out != test.output {
|
||||||
t.Logf(out)
|
t.Log(out)
|
||||||
t.Logf(test.output)
|
t.Log(test.output)
|
||||||
t.Errorf("expected: \n%q\n but got output: \n%q\n", test.output, out)
|
t.Errorf("expected: \n%q\n but got output: \n%q\n", test.output, out)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -5175,7 +5175,7 @@ Parameters:
|
|||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if out != expectedOut {
|
if out != expectedOut {
|
||||||
t.Logf(out)
|
t.Log(out)
|
||||||
t.Errorf("expected : %q\n but got output:\n %q", test.output, out)
|
t.Errorf("expected : %q\n but got output:\n %q", test.output, out)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -6354,7 +6354,7 @@ Events: <none>` + "\n",
|
|||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if out != tc.output {
|
if out != tc.output {
|
||||||
t.Logf(out)
|
t.Log(out)
|
||||||
t.Errorf("expected :\n%s\nbut got output:\n%s", tc.output, out)
|
t.Errorf("expected :\n%s\nbut got output:\n%s", tc.output, out)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -62,7 +62,7 @@ func (p *HelpFlagPrinter) PrintHelpFlag(flag *flag.Flag) {
|
|||||||
}
|
}
|
||||||
appendTabStr := strings.ReplaceAll(wrappedStr, "\n", "\n\t")
|
appendTabStr := strings.ReplaceAll(wrappedStr, "\n", "\n\t")
|
||||||
|
|
||||||
fmt.Fprintf(p.out, appendTabStr+"\n\n")
|
fmt.Fprint(p.out, appendTabStr+"\n\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeFlag will output the help flag based
|
// writeFlag will output the help flag based
|
||||||
|
@ -623,7 +623,7 @@ func (mounter *SafeFormatAndMount) formatAndMountSensitive(source string, target
|
|||||||
sensitiveOptionsLog := sanitizedOptionsForLogging(options, sensitiveOptions)
|
sensitiveOptionsLog := sanitizedOptionsForLogging(options, sensitiveOptions)
|
||||||
detailedErr := fmt.Sprintf("format of disk %q failed: type:(%q) target:(%q) options:(%q) errcode:(%v) output:(%v) ", source, fstype, target, sensitiveOptionsLog, err, string(output))
|
detailedErr := fmt.Sprintf("format of disk %q failed: type:(%q) target:(%q) options:(%q) errcode:(%v) output:(%v) ", source, fstype, target, sensitiveOptionsLog, err, string(output))
|
||||||
klog.Error(detailedErr)
|
klog.Error(detailedErr)
|
||||||
return NewMountError(FormatFailed, detailedErr)
|
return NewMountError(FormatFailed, "%s", detailedErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target)
|
klog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target)
|
||||||
@ -646,7 +646,7 @@ func (mounter *SafeFormatAndMount) formatAndMountSensitive(source string, target
|
|||||||
// Mount the disk
|
// Mount the disk
|
||||||
klog.V(4).Infof("Attempting to mount disk %s in %s format at %s", source, fstype, target)
|
klog.V(4).Infof("Attempting to mount disk %s in %s format at %s", source, fstype, target)
|
||||||
if err := mounter.MountSensitive(source, target, fstype, options, sensitiveOptions); err != nil {
|
if err := mounter.MountSensitive(source, target, fstype, options, sensitiveOptions); err != nil {
|
||||||
return NewMountError(mountErrorValue, err.Error())
|
return NewMountError(mountErrorValue, "%s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -553,13 +553,13 @@ func TestSampleAPIServer(ctx context.Context, f *framework.Framework, aggrclient
|
|||||||
locatedWardle := false
|
locatedWardle := false
|
||||||
for _, item := range list.Items {
|
for _, item := range list.Items {
|
||||||
if item.Name == apiServiceName {
|
if item.Name == apiServiceName {
|
||||||
framework.Logf("Found " + apiServiceName + " in APIServiceList")
|
framework.Logf("Found %s in APIServiceList", apiServiceName)
|
||||||
locatedWardle = true
|
locatedWardle = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !locatedWardle {
|
if !locatedWardle {
|
||||||
framework.Failf("Unable to find " + apiServiceName + " in APIServiceList")
|
framework.Failf("Unable to find %s in APIServiceList", apiServiceName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// As the APIService doesn't have any labels currently set we need to
|
// As the APIService doesn't have any labels currently set we need to
|
||||||
@ -773,7 +773,7 @@ func validateErrorWithDebugInfo(ctx context.Context, f *framework.Framework, err
|
|||||||
msg += fmt.Sprintf("\nOriginal pods in %s:\n%v", namespace, pods)
|
msg += fmt.Sprintf("\nOriginal pods in %s:\n%v", namespace, pods)
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.Failf(msg)
|
framework.Fail(msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,7 +262,7 @@ func gatherMetrics(ctx context.Context, f *framework.Framework) {
|
|||||||
framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.")
|
framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.")
|
||||||
} else {
|
} else {
|
||||||
summary = (*e2emetrics.ComponentCollection)(&received)
|
summary = (*e2emetrics.ComponentCollection)(&received)
|
||||||
framework.Logf(summary.PrintHumanReadable())
|
framework.Logf("%s", summary.PrintHumanReadable())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2012,7 +2012,7 @@ func (z *zookeeperTester) write(statefulPodIndex int, kv map[string]string) {
|
|||||||
name := fmt.Sprintf("%v-%d", z.ss.Name, statefulPodIndex)
|
name := fmt.Sprintf("%v-%d", z.ss.Name, statefulPodIndex)
|
||||||
for k, v := range kv {
|
for k, v := range kv {
|
||||||
cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v)
|
cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v)
|
||||||
framework.Logf(e2ekubectl.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd))
|
framework.Logf("%s", e2ekubectl.RunKubectlOrDie(z.ss.Namespace, "exec", name, "--", "/bin/sh", "-c", cmd))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2047,7 +2047,7 @@ func (m *mysqlGaleraTester) deploy(ctx context.Context, ns string) *appsv1.State
|
|||||||
"create database statefulset;",
|
"create database statefulset;",
|
||||||
"use statefulset; create table foo (k varchar(20), v varchar(20));",
|
"use statefulset; create table foo (k varchar(20), v varchar(20));",
|
||||||
} {
|
} {
|
||||||
framework.Logf(m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ss.Name)))
|
framework.Logf("%s", m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ss.Name)))
|
||||||
}
|
}
|
||||||
return m.ss
|
return m.ss
|
||||||
}
|
}
|
||||||
@ -2056,7 +2056,7 @@ func (m *mysqlGaleraTester) write(statefulPodIndex int, kv map[string]string) {
|
|||||||
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
|
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
|
||||||
for k, v := range kv {
|
for k, v := range kv {
|
||||||
cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v)
|
cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v)
|
||||||
framework.Logf(m.mysqlExec(cmd, m.ss.Namespace, name))
|
framework.Logf(cmd, m.mysqlExec(cmd, m.ss.Namespace, name))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2087,7 +2087,7 @@ func (m *redisTester) deploy(ctx context.Context, ns string) *appsv1.StatefulSet
|
|||||||
func (m *redisTester) write(statefulPodIndex int, kv map[string]string) {
|
func (m *redisTester) write(statefulPodIndex int, kv map[string]string) {
|
||||||
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
|
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
|
||||||
for k, v := range kv {
|
for k, v := range kv {
|
||||||
framework.Logf(m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ss.Namespace, name))
|
framework.Logf("%s", m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ss.Namespace, name))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2117,7 +2117,7 @@ func (c *cockroachDBTester) deploy(ctx context.Context, ns string) *appsv1.State
|
|||||||
"CREATE DATABASE IF NOT EXISTS foo;",
|
"CREATE DATABASE IF NOT EXISTS foo;",
|
||||||
"CREATE TABLE IF NOT EXISTS foo.bar (k STRING PRIMARY KEY, v STRING);",
|
"CREATE TABLE IF NOT EXISTS foo.bar (k STRING PRIMARY KEY, v STRING);",
|
||||||
} {
|
} {
|
||||||
framework.Logf(c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ss.Name)))
|
framework.Logf("%s", c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ss.Name)))
|
||||||
}
|
}
|
||||||
return c.ss
|
return c.ss
|
||||||
}
|
}
|
||||||
@ -2126,7 +2126,7 @@ func (c *cockroachDBTester) write(statefulPodIndex int, kv map[string]string) {
|
|||||||
name := fmt.Sprintf("%v-%d", c.ss.Name, statefulPodIndex)
|
name := fmt.Sprintf("%v-%d", c.ss.Name, statefulPodIndex)
|
||||||
for k, v := range kv {
|
for k, v := range kv {
|
||||||
cmd := fmt.Sprintf("UPSERT INTO foo.bar VALUES ('%v', '%v');", k, v)
|
cmd := fmt.Sprintf("UPSERT INTO foo.bar VALUES ('%v', '%v');", k, v)
|
||||||
framework.Logf(c.cockroachDBExec(cmd, c.ss.Namespace, name))
|
framework.Logf("%s", c.cockroachDBExec(cmd, c.ss.Namespace, name))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (c *cockroachDBTester) read(statefulPodIndex int, key string) string {
|
func (c *cockroachDBTester) read(statefulPodIndex int, key string) string {
|
||||||
|
@ -110,7 +110,7 @@ func traceRouteToControlPlane() {
|
|||||||
cmd := exec.Command(traceroute, "-I", framework.APIAddress())
|
cmd := exec.Command(traceroute, "-I", framework.APIAddress())
|
||||||
out, err := cmd.Output()
|
out, err := cmd.Output()
|
||||||
if len(out) != 0 {
|
if len(out) != 0 {
|
||||||
framework.Logf(string(out))
|
framework.Logf("%s", string(out))
|
||||||
}
|
}
|
||||||
if exiterr, ok := err.(*exec.ExitError); err != nil && ok {
|
if exiterr, ok := err.(*exec.ExitError); err != nil && ok {
|
||||||
framework.Logf("Error while running traceroute: %s", exiterr.Stderr)
|
framework.Logf("Error while running traceroute: %s", exiterr.Stderr)
|
||||||
|
@ -40,7 +40,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func addMasterReplica(zone string) error {
|
func addMasterReplica(zone string) error {
|
||||||
framework.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone))
|
framework.Logf("Adding a new master replica, zone: %s", zone)
|
||||||
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false")
|
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -49,7 +49,7 @@ func addMasterReplica(zone string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func removeMasterReplica(zone string) error {
|
func removeMasterReplica(zone string) error {
|
||||||
framework.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone))
|
framework.Logf("Removing an existing master replica, zone: %s", zone)
|
||||||
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false")
|
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -58,7 +58,7 @@ func removeMasterReplica(zone string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func addWorkerNodes(zone string) error {
|
func addWorkerNodes(zone string) error {
|
||||||
framework.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone))
|
framework.Logf("Adding worker nodes, zone: %s", zone)
|
||||||
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true")
|
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -67,7 +67,7 @@ func addWorkerNodes(zone string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func removeWorkerNodes(zone string) error {
|
func removeWorkerNodes(zone string) error {
|
||||||
framework.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone))
|
framework.Logf("Removing worker nodes, zone: %s", zone)
|
||||||
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true")
|
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"regexp"
|
"regexp"
|
||||||
@ -595,7 +596,7 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(violatedConstraints) > 0 {
|
if len(violatedConstraints) > 0 {
|
||||||
return &summary, fmt.Errorf(strings.Join(violatedConstraints, "\n"))
|
return &summary, errors.New(strings.Join(violatedConstraints, "\n"))
|
||||||
}
|
}
|
||||||
return &summary, nil
|
return &summary, nil
|
||||||
}
|
}
|
||||||
|
@ -57,7 +57,7 @@ func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...inter
|
|||||||
if desc != "" {
|
if desc != "" {
|
||||||
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
|
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
|
||||||
}
|
}
|
||||||
Logf(msg)
|
Logf("%s", msg)
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
defer f.lock.Unlock()
|
defer f.lock.Unlock()
|
||||||
f.Flakes = append(f.Flakes, msg)
|
f.Flakes = append(f.Flakes, msg)
|
||||||
|
@ -311,7 +311,7 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) {
|
|||||||
switch TestContext.OutputPrintType {
|
switch TestContext.OutputPrintType {
|
||||||
case "hr":
|
case "hr":
|
||||||
if TestContext.ReportDir == "" {
|
if TestContext.ReportDir == "" {
|
||||||
Logf(summaries[i].PrintHumanReadable())
|
Logf("%s", summaries[i].PrintHumanReadable())
|
||||||
} else {
|
} else {
|
||||||
// TODO: learn to extract test name and append it to the kind instead of timestamp.
|
// TODO: learn to extract test name and append it to the kind instead of timestamp.
|
||||||
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
|
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
|
||||||
@ -393,7 +393,7 @@ func (f *Framework) AfterEach(ctx context.Context) {
|
|||||||
for namespaceKey, namespaceErr := range nsDeletionErrors {
|
for namespaceKey, namespaceErr := range nsDeletionErrors {
|
||||||
messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr))
|
messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr))
|
||||||
}
|
}
|
||||||
Failf(strings.Join(messages, ","))
|
Fail(strings.Join(messages, ","))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -560,7 +560,7 @@ func DescribeIng(ns string) {
|
|||||||
framework.Logf("\nOutput of kubectl describe ing:\n")
|
framework.Logf("\nOutput of kubectl describe ing:\n")
|
||||||
desc, _ := e2ekubectl.RunKubectl(
|
desc, _ := e2ekubectl.RunKubectl(
|
||||||
ns, "describe", "ing")
|
ns, "describe", "ing")
|
||||||
framework.Logf(desc)
|
framework.Logf("%s", desc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update retrieves the ingress, performs the passed function, and then updates it.
|
// Update retrieves the ingress, performs the passed function, and then updates it.
|
||||||
@ -829,7 +829,7 @@ func (j *TestJig) VerifyURL(ctx context.Context, route, host string, iterations
|
|||||||
for i := 0; i < iterations; i++ {
|
for i := 0; i < iterations; i++ {
|
||||||
b, err := SimpleGET(ctx, httpClient, route, host)
|
b, err := SimpleGET(ctx, httpClient, route, host)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf(b)
|
framework.Logf("%s", b)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
j.Logger.Infof("Verified %v with host %v %d times, sleeping for %v", route, host, i, interval)
|
j.Logger.Infof("Verified %v with host %v %d times, sleeping for %v", route, host, i, interval)
|
||||||
|
@ -255,7 +255,7 @@ func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets
|
|||||||
framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
|
framework.Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
|
||||||
desc, _ := e2ekubectl.RunKubectl(
|
desc, _ := e2ekubectl.RunKubectl(
|
||||||
e.Namespace, "describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
|
e.Namespace, "describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
|
||||||
framework.Logf(desc)
|
framework.Logf("%s", desc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -554,12 +554,12 @@ func (config *NetworkingTestConfig) executeCurlCmd(ctx context.Context, cmd stri
|
|||||||
stdout, err := e2epodoutput.RunHostCmd(config.Namespace, podName, cmd)
|
stdout, err := e2epodoutput.RunHostCmd(config.Namespace, podName, cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
|
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
|
||||||
framework.Logf(msg)
|
framework.Logf("%s", msg)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if !strings.Contains(stdout, expected) {
|
if !strings.Contains(stdout, expected) {
|
||||||
msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected)
|
msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected)
|
||||||
framework.Logf(msg)
|
framework.Logf("%s", msg)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
@ -128,7 +128,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
|||||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
|
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
|
||||||
}
|
}
|
||||||
if !silent {
|
if !silent {
|
||||||
framework.Logf(msg)
|
framework.Logf("%s", msg)
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -822,6 +822,6 @@ func verifyThatTaintIsGone(ctx context.Context, c clientset.Interface, nodeName
|
|||||||
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
|
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
|
||||||
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
|
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
|
||||||
if taintExists(nodeUpdated.Spec.Taints, taint) {
|
if taintExists(nodeUpdated.Spec.Taints, taint) {
|
||||||
framework.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
|
framework.Fail("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -46,10 +46,13 @@ func Skipf(format string, args ...interface{}) {
|
|||||||
panic("unreachable")
|
panic("unreachable")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Skip is an alias for ginkgo.Skip.
|
||||||
|
var Skip = ginkgo.Skip
|
||||||
|
|
||||||
// SkipUnlessAtLeast skips if the value is less than the minValue.
|
// SkipUnlessAtLeast skips if the value is less than the minValue.
|
||||||
func SkipUnlessAtLeast(value int, minValue int, message string) {
|
func SkipUnlessAtLeast(value int, minValue int, message string) {
|
||||||
if value < minValue {
|
if value < minValue {
|
||||||
skipInternalf(1, message)
|
skipInternalf(1, "%s", message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,11 +50,11 @@ func CreateStatefulSet(ctx context.Context, c clientset.Interface, manifestPath,
|
|||||||
svc, err := e2emanifest.SvcFromManifest(mkpath("service.yaml"))
|
svc, err := e2emanifest.SvcFromManifest(mkpath("service.yaml"))
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
framework.Logf(fmt.Sprintf("creating " + ss.Name + " service"))
|
framework.Logf("creating %s service", ss.Name)
|
||||||
_, err = c.CoreV1().Services(ns).Create(ctx, svc, metav1.CreateOptions{})
|
_, err = c.CoreV1().Services(ns).Create(ctx, svc, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
framework.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector))
|
framework.Logf("creating statefulset %v/%v with %d replicas and selector %+v", ss.Namespace, ss.Name, *(ss.Spec.Replicas), ss.Spec.Selector)
|
||||||
_, err = c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{})
|
_, err = c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss)
|
WaitForRunningAndReady(ctx, c, *ss.Spec.Replicas, ss)
|
||||||
|
@ -221,7 +221,7 @@ func assertCleanup(ns string, selectors ...string) {
|
|||||||
}
|
}
|
||||||
err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, verifyCleanupFunc)
|
err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, verifyCleanupFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf(e.Error())
|
framework.Fail(e.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -396,7 +396,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
|||||||
})
|
})
|
||||||
ginkgo.By("creating all guestbook components")
|
ginkgo.By("creating all guestbook components")
|
||||||
forEachGBFile(func(contents string) {
|
forEachGBFile(func(contents string) {
|
||||||
framework.Logf(contents)
|
framework.Logf("%s", contents)
|
||||||
e2ekubectl.RunKubectlOrDieInput(ns, contents, "create", "-f", "-")
|
e2ekubectl.RunKubectlOrDieInput(ns, contents, "create", "-f", "-")
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -1630,7 +1630,7 @@ metadata:
|
|||||||
ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue)
|
ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue)
|
||||||
output := e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
|
output := e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
|
||||||
if !strings.Contains(output, labelValue) {
|
if !strings.Contains(output, labelValue) {
|
||||||
framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName)
|
framework.Fail("Failed updating label " + labelName + " to the pod " + pausePodName)
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("removing the label " + labelName + " of a pod")
|
ginkgo.By("removing the label " + labelName + " of a pod")
|
||||||
@ -1638,7 +1638,7 @@ metadata:
|
|||||||
ginkgo.By("verifying the pod doesn't have the label " + labelName)
|
ginkgo.By("verifying the pod doesn't have the label " + labelName)
|
||||||
output = e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
|
output = e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
|
||||||
if strings.Contains(output, labelValue) {
|
if strings.Contains(output, labelValue) {
|
||||||
framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName)
|
framework.Fail("Failed removing label " + labelName + " of the pod " + pausePodName)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -1915,7 +1915,7 @@ metadata:
|
|||||||
ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key)
|
ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key)
|
||||||
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
|
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
|
||||||
if strings.Contains(output, testTaint.Key) {
|
if strings.Contains(output, testTaint.Key) {
|
||||||
framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName)
|
framework.Fail("Failed removing taint " + testTaint.Key + " of the node " + nodeName)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -1983,7 +1983,7 @@ metadata:
|
|||||||
ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key)
|
ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key)
|
||||||
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
|
output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
|
||||||
if strings.Contains(output, testTaint.Key) {
|
if strings.Contains(output, testTaint.Key) {
|
||||||
framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName)
|
framework.Fail("Failed removing taints " + testTaint.Key + " of the node " + nodeName)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -2330,7 +2330,7 @@ const applyTestLabel = "kubectl.kubernetes.io/apply-test"
|
|||||||
func readReplicationControllerFromString(contents string) *v1.ReplicationController {
|
func readReplicationControllerFromString(contents string) *v1.ReplicationController {
|
||||||
rc := v1.ReplicationController{}
|
rc := v1.ReplicationController{}
|
||||||
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
|
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
|
||||||
framework.Failf(err.Error())
|
framework.Fail(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return &rc
|
return &rc
|
||||||
|
@ -18,6 +18,7 @@ package network
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
@ -123,7 +124,7 @@ func (t *dnsTestCommon) runDig(dnsName, target string) []string {
|
|||||||
case "cluster-dns-ipv6":
|
case "cluster-dns-ipv6":
|
||||||
cmd = append(cmd, "AAAA")
|
cmd = append(cmd, "AAAA")
|
||||||
default:
|
default:
|
||||||
panic(fmt.Errorf("invalid target: " + target))
|
panic(errors.New("invalid target: " + target))
|
||||||
}
|
}
|
||||||
cmd = append(cmd, dnsName)
|
cmd = append(cmd, dnsName)
|
||||||
|
|
||||||
|
@ -271,7 +271,7 @@ var _ = common.SIGDescribe("Proxy", func() {
|
|||||||
framework.Logf("Pod %s has the following error logs: %s", pods[0].Name, body)
|
framework.Logf("Pod %s has the following error logs: %s", pods[0].Name, body)
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.Failf(strings.Join(errs, "\n"))
|
framework.Fail(strings.Join(errs, "\n"))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -263,7 +263,7 @@ func (at *affinityTracker) checkHostTrace(count int) (fulfilled, affinityHolds b
|
|||||||
|
|
||||||
func checkAffinityFailed(tracker affinityTracker, err string) {
|
func checkAffinityFailed(tracker affinityTracker, err string) {
|
||||||
framework.Logf("%v", tracker.hostTrace)
|
framework.Logf("%v", tracker.hostTrace)
|
||||||
framework.Failf(err)
|
framework.Fail(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartServeHostnameService creates a replication controller that serves its
|
// StartServeHostnameService creates a replication controller that serves its
|
||||||
|
@ -102,7 +102,7 @@ var _ = common.SIGDescribe("Service endpoints latency", func() {
|
|||||||
}
|
}
|
||||||
if n < 2 {
|
if n < 2 {
|
||||||
failing.Insert("Less than two runs succeeded; aborting.")
|
failing.Insert("Less than two runs succeeded; aborting.")
|
||||||
framework.Failf(strings.Join(failing.List(), "\n"))
|
framework.Fail(strings.Join(failing.List(), "\n"))
|
||||||
}
|
}
|
||||||
percentile := func(p int) time.Duration {
|
percentile := func(p int) time.Duration {
|
||||||
est := n * p / 100
|
est := n * p / 100
|
||||||
@ -129,7 +129,7 @@ var _ = common.SIGDescribe("Service endpoints latency", func() {
|
|||||||
if failing.Len() > 0 {
|
if failing.Len() > 0 {
|
||||||
errList := strings.Join(failing.List(), "\n")
|
errList := strings.Join(failing.List(), "\n")
|
||||||
helpfulInfo := fmt.Sprintf("\n50, 90, 99 percentiles: %v %v %v", p50, p90, p99)
|
helpfulInfo := fmt.Sprintf("\n50, 90, 99 percentiles: %v %v %v", p50, p90, p99)
|
||||||
framework.Failf(errList + helpfulInfo)
|
framework.Fail(errList + helpfulInfo)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -81,7 +81,7 @@ func DescribeSvc(ns string) {
|
|||||||
framework.Logf("\nOutput of kubectl describe svc:\n")
|
framework.Logf("\nOutput of kubectl describe svc:\n")
|
||||||
desc, _ := e2ekubectl.RunKubectl(
|
desc, _ := e2ekubectl.RunKubectl(
|
||||||
ns, "describe", "svc", fmt.Sprintf("--namespace=%v", ns))
|
ns, "describe", "svc", fmt.Sprintf("--namespace=%v", ns))
|
||||||
framework.Logf(desc)
|
framework.Logf("%s", desc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckSCTPModuleLoadedOnNodes checks whether any node on the list has the
|
// CheckSCTPModuleLoadedOnNodes checks whether any node on the list has the
|
||||||
|
@ -297,7 +297,7 @@ var _ = SIGDescribe("LimitRange", func() {
|
|||||||
lrNamespace, err := f.CreateNamespace(ctx, lrName, nil)
|
lrNamespace, err := f.CreateNamespace(ctx, lrName, nil)
|
||||||
framework.ExpectNoError(err, "failed creating Namespace")
|
framework.ExpectNoError(err, "failed creating Namespace")
|
||||||
framework.Logf("Namespace %q created", lrNamespace.ObjectMeta.Name)
|
framework.Logf("Namespace %q created", lrNamespace.ObjectMeta.Name)
|
||||||
framework.Logf(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, lrNamespace.Name))
|
framework.Logf("Creating LimitRange %q in namespace %q", lrName, lrNamespace.Name)
|
||||||
_, err = f.ClientSet.CoreV1().LimitRanges(lrNamespace.ObjectMeta.Name).Create(ctx, limitRange2, metav1.CreateOptions{})
|
_, err = f.ClientSet.CoreV1().LimitRanges(lrNamespace.ObjectMeta.Name).Create(ctx, limitRange2, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err, "Failed to create limitRange %q in %q namespace", lrName, lrNamespace.ObjectMeta.Name)
|
framework.ExpectNoError(err, "Failed to create limitRange %q in %q namespace", lrName, lrNamespace.ObjectMeta.Name)
|
||||||
|
|
||||||
|
@ -112,7 +112,7 @@ func (s *service) CreateVolume(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("CreateVolumeEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("CreateVolumeEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &csi.CreateVolumeResponse{Volume: &v}, nil
|
return &csi.CreateVolumeResponse{Volume: &v}, nil
|
||||||
@ -132,7 +132,7 @@ func (s *service) DeleteVolume(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("DeleteVolumeStart"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("DeleteVolumeStart"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the volume does not exist then return an idempotent response.
|
// If the volume does not exist then return an idempotent response.
|
||||||
@ -150,7 +150,7 @@ func (s *service) DeleteVolume(
|
|||||||
klog.V(5).InfoS("mock delete volume", "volumeID", req.VolumeId)
|
klog.V(5).InfoS("mock delete volume", "volumeID", req.VolumeId)
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("DeleteVolumeEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("DeleteVolumeEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
return &csi.DeleteVolumeResponse{}, nil
|
return &csi.DeleteVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
@ -179,7 +179,7 @@ func (s *service) ControllerPublishVolume(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("ControllerPublishVolumeStart"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("ControllerPublishVolumeStart"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.volsRWL.Lock()
|
s.volsRWL.Lock()
|
||||||
@ -246,7 +246,7 @@ func (s *service) ControllerPublishVolume(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("ControllerPublishVolumeEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("ControllerPublishVolumeEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &csi.ControllerPublishVolumeResponse{
|
return &csi.ControllerPublishVolumeResponse{
|
||||||
@ -280,7 +280,7 @@ func (s *service) ControllerUnpublishVolume(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("ControllerUnpublishVolumeStart"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("ControllerUnpublishVolumeStart"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.volsRWL.Lock()
|
s.volsRWL.Lock()
|
||||||
@ -309,7 +309,7 @@ func (s *service) ControllerUnpublishVolume(
|
|||||||
s.vols[i] = v
|
s.vols[i] = v
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("ControllerUnpublishVolumeEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("ControllerUnpublishVolumeEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &csi.ControllerUnpublishVolumeResponse{}, nil
|
return &csi.ControllerUnpublishVolumeResponse{}, nil
|
||||||
@ -332,7 +332,7 @@ func (s *service) ValidateVolumeCapabilities(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("ValidateVolumeCapabilities"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("ValidateVolumeCapabilities"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &csi.ValidateVolumeCapabilitiesResponse{
|
return &csi.ValidateVolumeCapabilitiesResponse{
|
||||||
@ -350,7 +350,7 @@ func (s *service) ControllerGetVolume(
|
|||||||
*csi.ControllerGetVolumeResponse, error) {
|
*csi.ControllerGetVolumeResponse, error) {
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("GetVolumeStart"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("GetVolumeStart"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := &csi.ControllerGetVolumeResponse{
|
resp := &csi.ControllerGetVolumeResponse{
|
||||||
@ -373,7 +373,7 @@ func (s *service) ControllerGetVolume(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("GetVolumeEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("GetVolumeEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, nil
|
return resp, nil
|
||||||
@ -385,7 +385,7 @@ func (s *service) ListVolumes(
|
|||||||
*csi.ListVolumesResponse, error) {
|
*csi.ListVolumesResponse, error) {
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("ListVolumesStart"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("ListVolumesStart"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy the mock volumes into a new slice in order to avoid
|
// Copy the mock volumes into a new slice in order to avoid
|
||||||
@ -464,7 +464,7 @@ func (s *service) ListVolumes(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("ListVolumesEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("ListVolumesEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &csi.ListVolumesResponse{
|
return &csi.ListVolumesResponse{
|
||||||
@ -479,7 +479,7 @@ func (s *service) GetCapacity(
|
|||||||
*csi.GetCapacityResponse, error) {
|
*csi.GetCapacityResponse, error) {
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("GetCapacity"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("GetCapacity"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &csi.GetCapacityResponse{
|
return &csi.GetCapacityResponse{
|
||||||
@ -493,7 +493,7 @@ func (s *service) ControllerGetCapabilities(
|
|||||||
*csi.ControllerGetCapabilitiesResponse, error) {
|
*csi.ControllerGetCapabilitiesResponse, error) {
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("ControllerGetCapabilitiesStart"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("ControllerGetCapabilitiesStart"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
caps := []*csi.ControllerServiceCapability{
|
caps := []*csi.ControllerServiceCapability{
|
||||||
@ -597,7 +597,7 @@ func (s *service) ControllerGetCapabilities(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("ControllerGetCapabilitiesEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("ControllerGetCapabilitiesEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &csi.ControllerGetCapabilitiesResponse{
|
return &csi.ControllerGetCapabilitiesResponse{
|
||||||
@ -630,7 +630,7 @@ func (s *service) CreateSnapshot(ctx context.Context,
|
|||||||
s.snapshots.Add(snapshot)
|
s.snapshots.Add(snapshot)
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("CreateSnapshotEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("CreateSnapshotEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &csi.CreateSnapshotResponse{Snapshot: &snapshot.SnapshotCSI}, nil
|
return &csi.CreateSnapshotResponse{Snapshot: &snapshot.SnapshotCSI}, nil
|
||||||
@ -645,7 +645,7 @@ func (s *service) DeleteSnapshot(ctx context.Context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("DeleteSnapshotStart"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("DeleteSnapshotStart"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the snapshot does not exist then return an idempotent response.
|
// If the snapshot does not exist then return an idempotent response.
|
||||||
@ -661,7 +661,7 @@ func (s *service) DeleteSnapshot(ctx context.Context,
|
|||||||
klog.V(5).InfoS("mock delete snapshot", "snapshotId", req.SnapshotId)
|
klog.V(5).InfoS("mock delete snapshot", "snapshotId", req.SnapshotId)
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("DeleteSnapshotEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("DeleteSnapshotEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &csi.DeleteSnapshotResponse{}, nil
|
return &csi.DeleteSnapshotResponse{}, nil
|
||||||
@ -671,7 +671,7 @@ func (s *service) ListSnapshots(ctx context.Context,
|
|||||||
req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
|
req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("ListSnapshots"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("ListSnapshots"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// case 1: SnapshotId is not empty, return snapshots that match the snapshot id.
|
// case 1: SnapshotId is not empty, return snapshots that match the snapshot id.
|
||||||
@ -700,7 +700,7 @@ func (s *service) ControllerExpandVolume(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("ControllerExpandVolumeStart"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("ControllerExpandVolumeStart"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.volsRWL.Lock()
|
s.volsRWL.Lock()
|
||||||
@ -737,7 +737,7 @@ func (s *service) ControllerExpandVolume(
|
|||||||
s.vols[i] = v
|
s.vols[i] = v
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("ControllerExpandVolumeEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("ControllerExpandVolumeEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, nil
|
return resp, nil
|
||||||
|
@ -89,7 +89,7 @@ func (s *service) NodeStageVolume(
|
|||||||
s.vols[i] = v
|
s.vols[i] = v
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("NodeStageVolumeEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("NodeStageVolumeEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &csi.NodeStageVolumeResponse{}, nil
|
return &csi.NodeStageVolumeResponse{}, nil
|
||||||
@ -130,7 +130,7 @@ func (s *service) NodeUnstageVolume(
|
|||||||
s.vols[i] = v
|
s.vols[i] = v
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("NodeUnstageVolumeEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("NodeUnstageVolumeEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||||
}
|
}
|
||||||
@ -141,7 +141,7 @@ func (s *service) NodePublishVolume(
|
|||||||
*csi.NodePublishVolumeResponse, error) {
|
*csi.NodePublishVolumeResponse, error) {
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("NodePublishVolumeStart"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("NodePublishVolumeStart"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
ephemeralVolume := req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "true"
|
ephemeralVolume := req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "true"
|
||||||
device, ok := req.PublishContext["device"]
|
device, ok := req.PublishContext["device"]
|
||||||
@ -229,7 +229,7 @@ func (s *service) NodePublishVolume(
|
|||||||
s.vols[i] = v
|
s.vols[i] = v
|
||||||
}
|
}
|
||||||
if hookVal, hookMsg := s.execHook("NodePublishVolumeEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("NodePublishVolumeEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &csi.NodePublishVolumeResponse{}, nil
|
return &csi.NodePublishVolumeResponse{}, nil
|
||||||
@ -247,7 +247,7 @@ func (s *service) NodeUnpublishVolume(
|
|||||||
return nil, status.Error(codes.InvalidArgument, "Target Path cannot be empty")
|
return nil, status.Error(codes.InvalidArgument, "Target Path cannot be empty")
|
||||||
}
|
}
|
||||||
if hookVal, hookMsg := s.execHook("NodeUnpublishVolumeStart"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("NodeUnpublishVolumeStart"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.volsRWL.Lock()
|
s.volsRWL.Lock()
|
||||||
@ -282,7 +282,7 @@ func (s *service) NodeUnpublishVolume(
|
|||||||
s.vols[i] = v
|
s.vols[i] = v
|
||||||
}
|
}
|
||||||
if hookVal, hookMsg := s.execHook("NodeUnpublishVolumeEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("NodeUnpublishVolumeEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||||
@ -296,7 +296,7 @@ func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolum
|
|||||||
return nil, status.Error(codes.InvalidArgument, "Volume Path cannot be empty")
|
return nil, status.Error(codes.InvalidArgument, "Volume Path cannot be empty")
|
||||||
}
|
}
|
||||||
if hookVal, hookMsg := s.execHook("NodeExpandVolumeStart"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("NodeExpandVolumeStart"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.volsRWL.Lock()
|
s.volsRWL.Lock()
|
||||||
@ -323,7 +323,7 @@ func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolum
|
|||||||
s.vols[i] = v
|
s.vols[i] = v
|
||||||
}
|
}
|
||||||
if hookVal, hookMsg := s.execHook("NodeExpandVolumeEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("NodeExpandVolumeEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, nil
|
return resp, nil
|
||||||
@ -335,7 +335,7 @@ func (s *service) NodeGetCapabilities(
|
|||||||
*csi.NodeGetCapabilitiesResponse, error) {
|
*csi.NodeGetCapabilitiesResponse, error) {
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("NodeGetCapabilities"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("NodeGetCapabilities"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
capabilities := []*csi.NodeServiceCapability{
|
capabilities := []*csi.NodeServiceCapability{
|
||||||
{
|
{
|
||||||
@ -395,7 +395,7 @@ func (s *service) NodeGetCapabilities(
|
|||||||
func (s *service) NodeGetInfo(ctx context.Context,
|
func (s *service) NodeGetInfo(ctx context.Context,
|
||||||
req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
|
req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
|
||||||
if hookVal, hookMsg := s.execHook("NodeGetInfo"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("NodeGetInfo"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
csiNodeResponse := &csi.NodeGetInfoResponse{
|
csiNodeResponse := &csi.NodeGetInfoResponse{
|
||||||
NodeId: s.nodeID,
|
NodeId: s.nodeID,
|
||||||
@ -442,11 +442,11 @@ func (s *service) NodeGetVolumeStats(ctx context.Context,
|
|||||||
msg := fmt.Sprintf("volume %q doest not exist on the specified path %q", req.VolumeId, req.VolumePath)
|
msg := fmt.Sprintf("volume %q doest not exist on the specified path %q", req.VolumeId, req.VolumePath)
|
||||||
resp.VolumeCondition.Abnormal = true
|
resp.VolumeCondition.Abnormal = true
|
||||||
resp.VolumeCondition.Message = msg
|
resp.VolumeCondition.Message = msg
|
||||||
return resp, status.Errorf(codes.NotFound, msg)
|
return resp, status.Error(codes.NotFound, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("NodeGetVolumeStatsEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("NodeGetVolumeStatsEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Error(hookVal, hookMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp.Usage = []*csi.VolumeUsage{
|
resp.Usage = []*csi.VolumeUsage{
|
||||||
|
@ -762,7 +762,7 @@ func ensureTopologyRequirements(ctx context.Context, nodeSelection *e2epod.NodeS
|
|||||||
nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs)
|
nodes, err := e2enode.GetReadySchedulableNodes(ctx, cs)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if len(nodes.Items) < minCount {
|
if len(nodes.Items) < minCount {
|
||||||
e2eskipper.Skipf(fmt.Sprintf("Number of available nodes is less than %d - skipping", minCount))
|
e2eskipper.Skipf("Number of available nodes is less than %d - skipping", minCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
topologyKeys := driverInfo.TopologyKeys
|
topologyKeys := driverInfo.TopologyKeys
|
||||||
|
@ -18,6 +18,7 @@ package testsuites
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
@ -314,7 +315,7 @@ func cleanupTest(ctx context.Context, cs clientset.Interface, ns string, podName
|
|||||||
cleanupErrors = append(cleanupErrors, fmt.Sprintf("timed out waiting for PVs to be deleted: %s", err))
|
cleanupErrors = append(cleanupErrors, fmt.Sprintf("timed out waiting for PVs to be deleted: %s", err))
|
||||||
}
|
}
|
||||||
if len(cleanupErrors) != 0 {
|
if len(cleanupErrors) != 0 {
|
||||||
return fmt.Errorf("test cleanup failed: " + strings.Join(cleanupErrors, "; "))
|
return errors.New("test cleanup failed: " + strings.Join(cleanupErrors, "; "))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -550,7 +550,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
// The claim should timeout phase:Pending
|
// The claim should timeout phase:Pending
|
||||||
err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
|
err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
|
||||||
gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound")))
|
gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound")))
|
||||||
framework.Logf(err.Error())
|
framework.Logf("%s", err.Error())
|
||||||
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{})
|
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
|
gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
|
||||||
@ -589,7 +589,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
// The claim should timeout phase:Pending
|
// The claim should timeout phase:Pending
|
||||||
err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
|
err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
|
||||||
gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound")))
|
gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound")))
|
||||||
framework.Logf(err.Error())
|
framework.Logf("%s", err.Error())
|
||||||
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{})
|
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
|
gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
|
||||||
|
@ -19,6 +19,7 @@ package apps
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
@ -131,13 +132,14 @@ func (t *CassandraUpgradeTest) listUsers() ([]string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf(string(b))
|
return nil, errors.New(string(b))
|
||||||
}
|
}
|
||||||
var names []string
|
var names []string
|
||||||
if err := json.NewDecoder(r.Body).Decode(&names); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&names); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return names, nil
|
return names, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// addUser adds a user to the db via the tester services.
|
// addUser adds a user to the db via the tester services.
|
||||||
@ -153,7 +155,7 @@ func (t *CassandraUpgradeTest) addUser(name string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fmt.Errorf(string(b))
|
return errors.New(string(b))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,7 @@ package apps
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
@ -125,7 +126,7 @@ func (t *EtcdUpgradeTest) listUsers() ([]string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf(string(b))
|
return nil, errors.New(string(b))
|
||||||
}
|
}
|
||||||
var names []string
|
var names []string
|
||||||
if err := json.NewDecoder(r.Body).Decode(&names); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&names); err != nil {
|
||||||
@ -146,7 +147,7 @@ func (t *EtcdUpgradeTest) addUser(name string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fmt.Errorf(string(b))
|
return errors.New(string(b))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,7 @@ package apps
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
@ -194,7 +195,7 @@ func (t *MySQLUpgradeTest) addName(name string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fmt.Errorf(string(b))
|
return errors.New(string(b))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -212,7 +213,7 @@ func (t *MySQLUpgradeTest) countNames() (int, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return 0, fmt.Errorf(string(b))
|
return 0, errors.New(string(b))
|
||||||
}
|
}
|
||||||
var count int
|
var count int
|
||||||
if err := json.NewDecoder(r.Body).Decode(&count); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&count); err != nil {
|
||||||
|
@ -112,19 +112,19 @@ var _ = sigDescribe(feature.Windows, "GMSA Full", framework.WithSerial(), framew
|
|||||||
ginkgo.By("deploying the GMSA webhook")
|
ginkgo.By("deploying the GMSA webhook")
|
||||||
err := deployGmsaWebhook(ctx, f)
|
err := deployGmsaWebhook(ctx, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf(err.Error())
|
framework.Fail(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("creating the GMSA custom resource")
|
ginkgo.By("creating the GMSA custom resource")
|
||||||
err = createGmsaCustomResource(f.Namespace.Name, crdManifestContents)
|
err = createGmsaCustomResource(f.Namespace.Name, crdManifestContents)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf(err.Error())
|
framework.Fail(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("creating an RBAC role to grant use access to that GMSA resource")
|
ginkgo.By("creating an RBAC role to grant use access to that GMSA resource")
|
||||||
rbacRoleName, err := createRBACRoleForGmsa(ctx, f)
|
rbacRoleName, err := createRBACRoleForGmsa(ctx, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf(err.Error())
|
framework.Fail(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("creating a service account")
|
ginkgo.By("creating a service account")
|
||||||
@ -179,19 +179,19 @@ var _ = sigDescribe(feature.Windows, "GMSA Full", framework.WithSerial(), framew
|
|||||||
ginkgo.By("deploying the GMSA webhook")
|
ginkgo.By("deploying the GMSA webhook")
|
||||||
err := deployGmsaWebhook(ctx, f)
|
err := deployGmsaWebhook(ctx, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf(err.Error())
|
framework.Fail(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("creating the GMSA custom resource")
|
ginkgo.By("creating the GMSA custom resource")
|
||||||
err = createGmsaCustomResource(f.Namespace.Name, crdManifestContents)
|
err = createGmsaCustomResource(f.Namespace.Name, crdManifestContents)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf(err.Error())
|
framework.Fail(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("creating an RBAC role to grant use access to that GMSA resource")
|
ginkgo.By("creating an RBAC role to grant use access to that GMSA resource")
|
||||||
rbacRoleName, err := createRBACRoleForGmsa(ctx, f)
|
rbacRoleName, err := createRBACRoleForGmsa(ctx, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf(err.Error())
|
framework.Fail(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("creating a service account")
|
ginkgo.By("creating a service account")
|
||||||
|
@ -18,6 +18,7 @@ package e2enode
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
@ -90,8 +91,7 @@ var _ = SIGDescribe("SystemNodeCriticalPod", framework.WithSlow(), framework.Wit
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
msg := fmt.Sprintf("NodeCondition: %s not encountered yet", v1.NodeDiskPressure)
|
msg := fmt.Sprintf("NodeCondition: %s not encountered yet", v1.NodeDiskPressure)
|
||||||
framework.Logf(msg)
|
return errors.New(msg)
|
||||||
return fmt.Errorf(msg)
|
|
||||||
}, time.Minute*2, time.Second*4).Should(gomega.Succeed())
|
}, time.Minute*2, time.Second*4).Should(gomega.Succeed())
|
||||||
|
|
||||||
ginkgo.By("check if it's running all the time")
|
ginkgo.By("check if it's running all the time")
|
||||||
@ -100,7 +100,7 @@ var _ = SIGDescribe("SystemNodeCriticalPod", framework.WithSlow(), framework.Wit
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
framework.Logf("mirror pod %q is running", mirrorPodName)
|
framework.Logf("mirror pod %q is running", mirrorPodName)
|
||||||
} else {
|
} else {
|
||||||
framework.Logf(err.Error())
|
framework.Logf("%s", err.Error())
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}, time.Minute*8, time.Second*4).ShouldNot(gomega.HaveOccurred())
|
}, time.Minute*8, time.Second*4).ShouldNot(gomega.HaveOccurred())
|
||||||
|
@ -42,8 +42,8 @@ func requireSRIOVDevices() {
|
|||||||
|
|
||||||
msg := "this test is meant to run on a system with at least one configured VF from SRIOV device"
|
msg := "this test is meant to run on a system with at least one configured VF from SRIOV device"
|
||||||
if framework.TestContext.RequireDevices {
|
if framework.TestContext.RequireDevices {
|
||||||
framework.Failf(msg)
|
framework.Fail(msg)
|
||||||
} else {
|
} else {
|
||||||
e2eskipper.Skipf(msg)
|
e2eskipper.Skip(msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user