cleanup: remove duplicate import
Signed-off-by: Abirdcfly <fp544037857@gmail.com>
This commit is contained in:
parent
5d7fdf1f12
commit
00b9ead02c
@ -30,7 +30,6 @@ import (
|
|||||||
"k8s.io/apiserver/pkg/features"
|
"k8s.io/apiserver/pkg/features"
|
||||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||||
genericoptions "k8s.io/apiserver/pkg/server/options"
|
genericoptions "k8s.io/apiserver/pkg/server/options"
|
||||||
"k8s.io/apiserver/pkg/util/feature"
|
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/apiserver/pkg/util/webhook"
|
"k8s.io/apiserver/pkg/util/webhook"
|
||||||
kubeexternalinformers "k8s.io/client-go/informers"
|
kubeexternalinformers "k8s.io/client-go/informers"
|
||||||
@ -58,7 +57,7 @@ func createAPIExtensionsConfig(
|
|||||||
&genericConfig,
|
&genericConfig,
|
||||||
externalInformers,
|
externalInformers,
|
||||||
genericConfig.LoopbackClientConfig,
|
genericConfig.LoopbackClientConfig,
|
||||||
feature.DefaultFeatureGate,
|
utilfeature.DefaultFeatureGate,
|
||||||
pluginInitializers...)
|
pluginInitializers...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
apps "k8s.io/api/apps/v1"
|
apps "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/version"
|
|
||||||
versionutil "k8s.io/apimachinery/pkg/util/version"
|
versionutil "k8s.io/apimachinery/pkg/util/version"
|
||||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||||
|
|
||||||
@ -98,11 +97,11 @@ func TestGetAvailableUpgrades(t *testing.T) {
|
|||||||
// variables are in the form v{MAJOR}{MINOR}{PATCH}, where MINOR is a variable so test are automatically uptodate to the latest MinimumControlPlaneVersion/
|
// variables are in the form v{MAJOR}{MINOR}{PATCH}, where MINOR is a variable so test are automatically uptodate to the latest MinimumControlPlaneVersion/
|
||||||
|
|
||||||
// v1.X series, e.g. v1.14
|
// v1.X series, e.g. v1.14
|
||||||
v1X0 := version.MustParseSemantic("v1.14.0")
|
v1X0 := versionutil.MustParseSemantic("v1.14.0")
|
||||||
v1X5 := v1X0.WithPatch(5)
|
v1X5 := v1X0.WithPatch(5)
|
||||||
|
|
||||||
// v1.Y series, where Y = X+1, e.g. v1.15
|
// v1.Y series, where Y = X+1, e.g. v1.15
|
||||||
v1Y0 := version.MustParseSemantic("v1.15.0")
|
v1Y0 := versionutil.MustParseSemantic("v1.15.0")
|
||||||
v1Y0alpha0 := v1Y0.WithPreRelease("alpha.0")
|
v1Y0alpha0 := v1Y0.WithPreRelease("alpha.0")
|
||||||
v1Y0alpha1 := v1Y0.WithPreRelease("alpha.1")
|
v1Y0alpha1 := v1Y0.WithPreRelease("alpha.1")
|
||||||
v1Y1 := v1Y0.WithPatch(1)
|
v1Y1 := v1Y0.WithPatch(1)
|
||||||
@ -111,7 +110,7 @@ func TestGetAvailableUpgrades(t *testing.T) {
|
|||||||
v1Y5 := v1Y0.WithPatch(5)
|
v1Y5 := v1Y0.WithPatch(5)
|
||||||
|
|
||||||
// v1.Z series, where Z = Y+1, e.g. v1.16
|
// v1.Z series, where Z = Y+1, e.g. v1.16
|
||||||
v1Z0 := version.MustParseSemantic("v1.16.0")
|
v1Z0 := versionutil.MustParseSemantic("v1.16.0")
|
||||||
v1Z0alpha1 := v1Z0.WithPreRelease("alpha.1")
|
v1Z0alpha1 := v1Z0.WithPreRelease("alpha.1")
|
||||||
v1Z0alpha2 := v1Z0.WithPreRelease("alpha.2")
|
v1Z0alpha2 := v1Z0.WithPreRelease("alpha.2")
|
||||||
v1Z0beta1 := v1Z0.WithPreRelease("beta.1")
|
v1Z0beta1 := v1Z0.WithPreRelease("beta.1")
|
||||||
|
@ -39,7 +39,6 @@ import (
|
|||||||
netutil "k8s.io/apimachinery/pkg/util/net"
|
netutil "k8s.io/apimachinery/pkg/util/net"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/validation"
|
"k8s.io/apimachinery/pkg/util/validation"
|
||||||
"k8s.io/apimachinery/pkg/util/version"
|
|
||||||
versionutil "k8s.io/apimachinery/pkg/util/version"
|
versionutil "k8s.io/apimachinery/pkg/util/version"
|
||||||
kubeadmversion "k8s.io/component-base/version"
|
kubeadmversion "k8s.io/component-base/version"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
@ -48,7 +47,6 @@ import (
|
|||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
|
|
||||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
|
||||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||||
"k8s.io/kubernetes/cmd/kubeadm/app/images"
|
"k8s.io/kubernetes/cmd/kubeadm/app/images"
|
||||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/initsystem"
|
"k8s.io/kubernetes/cmd/kubeadm/app/util/initsystem"
|
||||||
@ -598,7 +596,7 @@ func (kubever KubernetesVersionCheck) Check() (warnings, errorList []error) {
|
|||||||
// KubeletVersionCheck validates installed kubelet version
|
// KubeletVersionCheck validates installed kubelet version
|
||||||
type KubeletVersionCheck struct {
|
type KubeletVersionCheck struct {
|
||||||
KubernetesVersion string
|
KubernetesVersion string
|
||||||
minKubeletVersion *version.Version
|
minKubeletVersion *versionutil.Version
|
||||||
exec utilsexec.Interface
|
exec utilsexec.Interface
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -615,7 +613,7 @@ func (kubever KubeletVersionCheck) Check() (warnings, errorList []error) {
|
|||||||
return nil, []error{errors.Wrap(err, "couldn't get kubelet version")}
|
return nil, []error{errors.Wrap(err, "couldn't get kubelet version")}
|
||||||
}
|
}
|
||||||
if kubever.minKubeletVersion == nil {
|
if kubever.minKubeletVersion == nil {
|
||||||
kubever.minKubeletVersion = constants.MinimumKubeletVersion
|
kubever.minKubeletVersion = kubeadmconstants.MinimumKubeletVersion
|
||||||
}
|
}
|
||||||
if kubeletVersion.LessThan(kubever.minKubeletVersion) {
|
if kubeletVersion.LessThan(kubever.minKubeletVersion) {
|
||||||
return nil, []error{errors.Errorf("Kubelet version %q is lower than kubeadm can support. Please upgrade kubelet", kubeletVersion)}
|
return nil, []error{errors.Errorf("Kubelet version %q is lower than kubeadm can support. Please upgrade kubelet", kubeletVersion)}
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
errorsutil "k8s.io/apimachinery/pkg/util/errors"
|
errorsutil "k8s.io/apimachinery/pkg/util/errors"
|
||||||
|
|
||||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
|
||||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||||
)
|
)
|
||||||
@ -124,9 +123,9 @@ func (w *Waiter) SetTimeout(_ time.Duration) {}
|
|||||||
// WaitForStaticPodControlPlaneHashes returns an empty hash for all control plane images;
|
// WaitForStaticPodControlPlaneHashes returns an empty hash for all control plane images;
|
||||||
func (w *Waiter) WaitForStaticPodControlPlaneHashes(_ string) (map[string]string, error) {
|
func (w *Waiter) WaitForStaticPodControlPlaneHashes(_ string) (map[string]string, error) {
|
||||||
return map[string]string{
|
return map[string]string{
|
||||||
constants.KubeAPIServer: "",
|
kubeadmconstants.KubeAPIServer: "",
|
||||||
constants.KubeControllerManager: "",
|
kubeadmconstants.KubeControllerManager: "",
|
||||||
constants.KubeScheduler: "",
|
kubeadmconstants.KubeScheduler: "",
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,7 +45,6 @@ import (
|
|||||||
schedulinghelper "k8s.io/component-helpers/scheduling/corev1"
|
schedulinghelper "k8s.io/component-helpers/scheduling/corev1"
|
||||||
apiservice "k8s.io/kubernetes/pkg/api/service"
|
apiservice "k8s.io/kubernetes/pkg/api/service"
|
||||||
"k8s.io/kubernetes/pkg/apis/core"
|
"k8s.io/kubernetes/pkg/apis/core"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
|
||||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||||
podshelper "k8s.io/kubernetes/pkg/apis/core/pods"
|
podshelper "k8s.io/kubernetes/pkg/apis/core/pods"
|
||||||
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||||
@ -4715,7 +4714,7 @@ func validateServicePort(sp *core.ServicePort, requireName, isHeadlessService bo
|
|||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
func needsExternalTrafficPolicy(svc *api.Service) bool {
|
func needsExternalTrafficPolicy(svc *core.Service) bool {
|
||||||
return svc.Spec.Type == core.ServiceTypeLoadBalancer || svc.Spec.Type == core.ServiceTypeNodePort
|
return svc.Spec.Type == core.ServiceTypeLoadBalancer || svc.Spec.Type == core.ServiceTypeNodePort
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4760,7 +4759,7 @@ func validateServiceExternalTrafficPolicy(service *core.Service) field.ErrorList
|
|||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateServiceExternalTrafficFieldsUpdate(before, after *api.Service) field.ErrorList {
|
func validateServiceExternalTrafficFieldsUpdate(before, after *core.Service) field.ErrorList {
|
||||||
allErrs := field.ErrorList{}
|
allErrs := field.ErrorList{}
|
||||||
|
|
||||||
if apiservice.NeedsHealthCheck(before) && apiservice.NeedsHealthCheck(after) {
|
if apiservice.NeedsHealthCheck(before) && apiservice.NeedsHealthCheck(after) {
|
||||||
|
@ -27,7 +27,6 @@ import (
|
|||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
asserttestify "github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@ -43,7 +42,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/apis/core"
|
"k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/pkg/capabilities"
|
"k8s.io/kubernetes/pkg/capabilities"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/utils/pointer"
|
|
||||||
utilpointer "k8s.io/utils/pointer"
|
utilpointer "k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -18944,7 +18942,7 @@ func TestValidateTopologySpreadConstraints(t *testing.T) {
|
|||||||
MaxSkew: 1,
|
MaxSkew: 1,
|
||||||
TopologyKey: "k8s.io/zone",
|
TopologyKey: "k8s.io/zone",
|
||||||
WhenUnsatisfiable: core.DoNotSchedule,
|
WhenUnsatisfiable: core.DoNotSchedule,
|
||||||
MinDomains: pointer.Int32(3),
|
MinDomains: utilpointer.Int32(3),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantFieldErrors: field.ErrorList{},
|
wantFieldErrors: field.ErrorList{},
|
||||||
@ -18982,10 +18980,10 @@ func TestValidateTopologySpreadConstraints(t *testing.T) {
|
|||||||
MaxSkew: 1,
|
MaxSkew: 1,
|
||||||
TopologyKey: "k8s.io/zone",
|
TopologyKey: "k8s.io/zone",
|
||||||
WhenUnsatisfiable: core.DoNotSchedule,
|
WhenUnsatisfiable: core.DoNotSchedule,
|
||||||
MinDomains: pointer.Int32(-1),
|
MinDomains: utilpointer.Int32(-1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantFieldErrors: []*field.Error{field.Invalid(fieldPathMinDomains, pointer.Int32(-1), isNotPositiveErrorMsg)},
|
wantFieldErrors: []*field.Error{field.Invalid(fieldPathMinDomains, utilpointer.Int32(-1), isNotPositiveErrorMsg)},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "cannot use non-nil MinDomains with ScheduleAnyway",
|
name: "cannot use non-nil MinDomains with ScheduleAnyway",
|
||||||
@ -18994,10 +18992,10 @@ func TestValidateTopologySpreadConstraints(t *testing.T) {
|
|||||||
MaxSkew: 1,
|
MaxSkew: 1,
|
||||||
TopologyKey: "k8s.io/zone",
|
TopologyKey: "k8s.io/zone",
|
||||||
WhenUnsatisfiable: core.ScheduleAnyway,
|
WhenUnsatisfiable: core.ScheduleAnyway,
|
||||||
MinDomains: pointer.Int32(10),
|
MinDomains: utilpointer.Int32(10),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantFieldErrors: []*field.Error{field.Invalid(fieldPathMinDomains, pointer.Int32(10), fmt.Sprintf("can only use minDomains if whenUnsatisfiable=%s, not %s", string(core.DoNotSchedule), string(core.ScheduleAnyway)))},
|
wantFieldErrors: []*field.Error{field.Invalid(fieldPathMinDomains, utilpointer.Int32(10), fmt.Sprintf("can only use minDomains if whenUnsatisfiable=%s, not %s", string(core.DoNotSchedule), string(core.ScheduleAnyway)))},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "use negative MinDomains with ScheduleAnyway(invalid)",
|
name: "use negative MinDomains with ScheduleAnyway(invalid)",
|
||||||
@ -19006,12 +19004,12 @@ func TestValidateTopologySpreadConstraints(t *testing.T) {
|
|||||||
MaxSkew: 1,
|
MaxSkew: 1,
|
||||||
TopologyKey: "k8s.io/zone",
|
TopologyKey: "k8s.io/zone",
|
||||||
WhenUnsatisfiable: core.ScheduleAnyway,
|
WhenUnsatisfiable: core.ScheduleAnyway,
|
||||||
MinDomains: pointer.Int32(-1),
|
MinDomains: utilpointer.Int32(-1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
wantFieldErrors: []*field.Error{
|
wantFieldErrors: []*field.Error{
|
||||||
field.Invalid(fieldPathMinDomains, pointer.Int32(-1), isNotPositiveErrorMsg),
|
field.Invalid(fieldPathMinDomains, utilpointer.Int32(-1), isNotPositiveErrorMsg),
|
||||||
field.Invalid(fieldPathMinDomains, pointer.Int32(-1), fmt.Sprintf("can only use minDomains if whenUnsatisfiable=%s, not %s", string(core.DoNotSchedule), string(core.ScheduleAnyway))),
|
field.Invalid(fieldPathMinDomains, utilpointer.Int32(-1), fmt.Sprintf("can only use minDomains if whenUnsatisfiable=%s, not %s", string(core.DoNotSchedule), string(core.ScheduleAnyway))),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -19668,7 +19666,7 @@ func TestValidateSeccompAnnotationsAndFieldsMatch(t *testing.T) {
|
|||||||
|
|
||||||
for i, test := range tests {
|
for i, test := range tests {
|
||||||
err := validateSeccompAnnotationsAndFieldsMatch(test.annotationValue, test.seccompField, test.fldPath)
|
err := validateSeccompAnnotationsAndFieldsMatch(test.annotationValue, test.seccompField, test.fldPath)
|
||||||
asserttestify.Equal(t, test.expectedErr, err, "TestCase[%d]: %s", i, test.description)
|
assert.Equal(t, test.expectedErr, err, "TestCase[%d]: %s", i, test.description)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -19797,7 +19795,7 @@ func TestValidatePodTemplateSpecSeccomp(t *testing.T) {
|
|||||||
|
|
||||||
for i, test := range tests {
|
for i, test := range tests {
|
||||||
err := ValidatePodTemplateSpec(test.spec, rootFld, PodValidationOptions{})
|
err := ValidatePodTemplateSpec(test.spec, rootFld, PodValidationOptions{})
|
||||||
asserttestify.Equal(t, test.expectedErr, err, "TestCase[%d]: %s", i, test.description)
|
assert.Equal(t, test.expectedErr, err, "TestCase[%d]: %s", i, test.description)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
discovery "k8s.io/api/discovery/v1"
|
discovery "k8s.io/api/discovery/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/controller/util/endpoint"
|
"k8s.io/kubernetes/pkg/controller/util/endpoint"
|
||||||
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
|
|
||||||
utilpointer "k8s.io/utils/pointer"
|
utilpointer "k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -33,8 +32,8 @@ func TestNumEndpointsAndSlices(t *testing.T) {
|
|||||||
p80 := int32(80)
|
p80 := int32(80)
|
||||||
p443 := int32(443)
|
p443 := int32(443)
|
||||||
|
|
||||||
pmKey80443 := endpointutil.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}, {Port: &p443}})
|
pmKey80443 := endpoint.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}, {Port: &p443}})
|
||||||
pmKey80 := endpointutil.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}})
|
pmKey80 := endpoint.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}})
|
||||||
|
|
||||||
spCacheEfficient := NewServicePortCache()
|
spCacheEfficient := NewServicePortCache()
|
||||||
spCacheEfficient.Set(pmKey80, EfficiencyInfo{Endpoints: 45, Slices: 1})
|
spCacheEfficient.Set(pmKey80, EfficiencyInfo{Endpoints: 45, Slices: 1})
|
||||||
@ -66,8 +65,8 @@ func TestPlaceHolderSlice(t *testing.T) {
|
|||||||
p80 := int32(80)
|
p80 := int32(80)
|
||||||
p443 := int32(443)
|
p443 := int32(443)
|
||||||
|
|
||||||
pmKey80443 := endpointutil.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}, {Port: &p443}})
|
pmKey80443 := endpoint.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}, {Port: &p443}})
|
||||||
pmKey80 := endpointutil.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}})
|
pmKey80 := endpoint.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}})
|
||||||
|
|
||||||
sp := NewServicePortCache()
|
sp := NewServicePortCache()
|
||||||
sp.Set(pmKey80, EfficiencyInfo{Endpoints: 0, Slices: 1})
|
sp.Set(pmKey80, EfficiencyInfo{Endpoints: 0, Slices: 1})
|
||||||
@ -95,7 +94,7 @@ func benchmarkUpdateServicePortCache(b *testing.B, num int) {
|
|||||||
ns := "benchmark"
|
ns := "benchmark"
|
||||||
httpKey := endpoint.NewPortMapKey([]discovery.EndpointPort{{Port: utilpointer.Int32Ptr(80)}})
|
httpKey := endpoint.NewPortMapKey([]discovery.EndpointPort{{Port: utilpointer.Int32Ptr(80)}})
|
||||||
httpsKey := endpoint.NewPortMapKey([]discovery.EndpointPort{{Port: utilpointer.Int32Ptr(443)}})
|
httpsKey := endpoint.NewPortMapKey([]discovery.EndpointPort{{Port: utilpointer.Int32Ptr(443)}})
|
||||||
spCache := &ServicePortCache{items: map[endpointutil.PortMapKey]EfficiencyInfo{
|
spCache := &ServicePortCache{items: map[endpoint.PortMapKey]EfficiencyInfo{
|
||||||
httpKey: {
|
httpKey: {
|
||||||
Endpoints: 182,
|
Endpoints: 182,
|
||||||
Slices: 2,
|
Slices: 2,
|
||||||
|
@ -20,8 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
discovery "k8s.io/api/discovery/v1"
|
discovery "k8s.io/api/discovery/v1"
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -33,7 +32,7 @@ import (
|
|||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
"k8s.io/kubernetes/pkg/apis/discovery/validation"
|
"k8s.io/kubernetes/pkg/apis/discovery/validation"
|
||||||
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
|
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
@ -41,7 +40,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// podToEndpoint returns an Endpoint object generated from a Pod, a Node, and a Service for a particular addressType.
|
// podToEndpoint returns an Endpoint object generated from a Pod, a Node, and a Service for a particular addressType.
|
||||||
func podToEndpoint(pod *corev1.Pod, node *corev1.Node, service *corev1.Service, addressType discovery.AddressType) discovery.Endpoint {
|
func podToEndpoint(pod *v1.Pod, node *v1.Node, service *v1.Service, addressType discovery.AddressType) discovery.Endpoint {
|
||||||
serving := podutil.IsPodReady(pod)
|
serving := podutil.IsPodReady(pod)
|
||||||
terminating := pod.DeletionTimestamp != nil
|
terminating := pod.DeletionTimestamp != nil
|
||||||
// For compatibility reasons, "ready" should never be "true" if a pod is terminatng, unless
|
// For compatibility reasons, "ready" should never be "true" if a pod is terminatng, unless
|
||||||
@ -52,7 +51,7 @@ func podToEndpoint(pod *corev1.Pod, node *corev1.Node, service *corev1.Service,
|
|||||||
Conditions: discovery.EndpointConditions{
|
Conditions: discovery.EndpointConditions{
|
||||||
Ready: &ready,
|
Ready: &ready,
|
||||||
},
|
},
|
||||||
TargetRef: &corev1.ObjectReference{
|
TargetRef: &v1.ObjectReference{
|
||||||
Kind: "Pod",
|
Kind: "Pod",
|
||||||
Namespace: pod.ObjectMeta.Namespace,
|
Namespace: pod.ObjectMeta.Namespace,
|
||||||
Name: pod.ObjectMeta.Name,
|
Name: pod.ObjectMeta.Name,
|
||||||
@ -69,8 +68,8 @@ func podToEndpoint(pod *corev1.Pod, node *corev1.Node, service *corev1.Service,
|
|||||||
ep.NodeName = &pod.Spec.NodeName
|
ep.NodeName = &pod.Spec.NodeName
|
||||||
}
|
}
|
||||||
|
|
||||||
if node != nil && node.Labels[corev1.LabelTopologyZone] != "" {
|
if node != nil && node.Labels[v1.LabelTopologyZone] != "" {
|
||||||
zone := node.Labels[corev1.LabelTopologyZone]
|
zone := node.Labels[v1.LabelTopologyZone]
|
||||||
ep.Zone = &zone
|
ep.Zone = &zone
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,7 +82,7 @@ func podToEndpoint(pod *corev1.Pod, node *corev1.Node, service *corev1.Service,
|
|||||||
|
|
||||||
// getEndpointPorts returns a list of EndpointPorts generated from a Service
|
// getEndpointPorts returns a list of EndpointPorts generated from a Service
|
||||||
// and Pod.
|
// and Pod.
|
||||||
func getEndpointPorts(service *corev1.Service, pod *corev1.Pod) []discovery.EndpointPort {
|
func getEndpointPorts(service *v1.Service, pod *v1.Pod) []discovery.EndpointPort {
|
||||||
endpointPorts := []discovery.EndpointPort{}
|
endpointPorts := []discovery.EndpointPort{}
|
||||||
|
|
||||||
// Allow headless service not to have ports.
|
// Allow headless service not to have ports.
|
||||||
@ -115,7 +114,7 @@ func getEndpointPorts(service *corev1.Service, pod *corev1.Pod) []discovery.Endp
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getEndpointAddresses returns a list of addresses generated from a pod status.
|
// getEndpointAddresses returns a list of addresses generated from a pod status.
|
||||||
func getEndpointAddresses(podStatus corev1.PodStatus, service *corev1.Service, addressType discovery.AddressType) []string {
|
func getEndpointAddresses(podStatus v1.PodStatus, service *v1.Service, addressType discovery.AddressType) []string {
|
||||||
addresses := []string{}
|
addresses := []string{}
|
||||||
|
|
||||||
for _, podIP := range podStatus.PodIPs {
|
for _, podIP := range podStatus.PodIPs {
|
||||||
@ -134,7 +133,7 @@ func getEndpointAddresses(podStatus corev1.PodStatus, service *corev1.Service, a
|
|||||||
|
|
||||||
// newEndpointSlice returns an EndpointSlice generated from a service and
|
// newEndpointSlice returns an EndpointSlice generated from a service and
|
||||||
// endpointMeta.
|
// endpointMeta.
|
||||||
func newEndpointSlice(service *corev1.Service, endpointMeta *endpointMeta) *discovery.EndpointSlice {
|
func newEndpointSlice(service *v1.Service, endpointMeta *endpointMeta) *discovery.EndpointSlice {
|
||||||
gvk := schema.GroupVersionKind{Version: "v1", Kind: "Service"}
|
gvk := schema.GroupVersionKind{Version: "v1", Kind: "Service"}
|
||||||
ownerRef := metav1.NewControllerRef(service, gvk)
|
ownerRef := metav1.NewControllerRef(service, gvk)
|
||||||
epSlice := &discovery.EndpointSlice{
|
epSlice := &discovery.EndpointSlice{
|
||||||
@ -166,7 +165,7 @@ func getEndpointSlicePrefix(serviceName string) string {
|
|||||||
|
|
||||||
// ownedBy returns true if the provided EndpointSlice is owned by the provided
|
// ownedBy returns true if the provided EndpointSlice is owned by the provided
|
||||||
// Service.
|
// Service.
|
||||||
func ownedBy(endpointSlice *discovery.EndpointSlice, svc *corev1.Service) bool {
|
func ownedBy(endpointSlice *discovery.EndpointSlice, svc *v1.Service) bool {
|
||||||
for _, o := range endpointSlice.OwnerReferences {
|
for _, o := range endpointSlice.OwnerReferences {
|
||||||
if o.UID == svc.UID && o.Kind == "Service" && o.APIVersion == "v1" {
|
if o.UID == svc.UID && o.Kind == "Service" && o.APIVersion == "v1" {
|
||||||
return true
|
return true
|
||||||
@ -222,9 +221,9 @@ func addTriggerTimeAnnotation(endpointSlice *discovery.EndpointSlice, triggerTim
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !triggerTime.IsZero() {
|
if !triggerTime.IsZero() {
|
||||||
endpointSlice.Annotations[corev1.EndpointsLastChangeTriggerTime] = triggerTime.UTC().Format(time.RFC3339Nano)
|
endpointSlice.Annotations[v1.EndpointsLastChangeTriggerTime] = triggerTime.UTC().Format(time.RFC3339Nano)
|
||||||
} else { // No new trigger time, clear the annotation.
|
} else { // No new trigger time, clear the annotation.
|
||||||
delete(endpointSlice.Annotations, corev1.EndpointsLastChangeTriggerTime)
|
delete(endpointSlice.Annotations, v1.EndpointsLastChangeTriggerTime)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -244,7 +243,7 @@ func serviceControllerKey(endpointSlice *discovery.EndpointSlice) (string, error
|
|||||||
// setEndpointSliceLabels returns a map with the new endpoint slices labels and true if there was an update.
|
// setEndpointSliceLabels returns a map with the new endpoint slices labels and true if there was an update.
|
||||||
// Slices labels must be equivalent to the Service labels except for the reserved IsHeadlessService, LabelServiceName and LabelManagedBy labels
|
// Slices labels must be equivalent to the Service labels except for the reserved IsHeadlessService, LabelServiceName and LabelManagedBy labels
|
||||||
// Changes to IsHeadlessService, LabelServiceName and LabelManagedBy labels on the Service do not result in updates to EndpointSlice labels.
|
// Changes to IsHeadlessService, LabelServiceName and LabelManagedBy labels on the Service do not result in updates to EndpointSlice labels.
|
||||||
func setEndpointSliceLabels(epSlice *discovery.EndpointSlice, service *corev1.Service) (map[string]string, bool) {
|
func setEndpointSliceLabels(epSlice *discovery.EndpointSlice, service *v1.Service) (map[string]string, bool) {
|
||||||
updated := false
|
updated := false
|
||||||
epLabels := make(map[string]string)
|
epLabels := make(map[string]string)
|
||||||
svcLabels := make(map[string]string)
|
svcLabels := make(map[string]string)
|
||||||
@ -308,7 +307,7 @@ func (sl endpointSliceEndpointLen) Less(i, j int) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// returns a map of address types used by a service
|
// returns a map of address types used by a service
|
||||||
func getAddressTypesForService(service *corev1.Service) map[discovery.AddressType]struct{} {
|
func getAddressTypesForService(service *v1.Service) map[discovery.AddressType]struct{} {
|
||||||
serviceSupportedAddresses := make(map[discovery.AddressType]struct{})
|
serviceSupportedAddresses := make(map[discovery.AddressType]struct{})
|
||||||
// TODO: (khenidak) when address types are removed in favor of
|
// TODO: (khenidak) when address types are removed in favor of
|
||||||
// v1.IPFamily this will need to be removed, and work directly with
|
// v1.IPFamily this will need to be removed, and work directly with
|
||||||
@ -317,11 +316,11 @@ func getAddressTypesForService(service *corev1.Service) map[discovery.AddressTyp
|
|||||||
// IMPORTANT: we assume that IP of (discovery.AddressType enum) is never in use
|
// IMPORTANT: we assume that IP of (discovery.AddressType enum) is never in use
|
||||||
// as it gets deprecated
|
// as it gets deprecated
|
||||||
for _, family := range service.Spec.IPFamilies {
|
for _, family := range service.Spec.IPFamilies {
|
||||||
if family == corev1.IPv4Protocol {
|
if family == v1.IPv4Protocol {
|
||||||
serviceSupportedAddresses[discovery.AddressTypeIPv4] = struct{}{}
|
serviceSupportedAddresses[discovery.AddressTypeIPv4] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
if family == corev1.IPv6Protocol {
|
if family == v1.IPv6Protocol {
|
||||||
serviceSupportedAddresses[discovery.AddressTypeIPv6] = struct{}{}
|
serviceSupportedAddresses[discovery.AddressTypeIPv6] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -345,7 +344,7 @@ func getAddressTypesForService(service *corev1.Service) map[discovery.AddressTyp
|
|||||||
// this ensures that traffic is not disrupted until then. But *may*
|
// this ensures that traffic is not disrupted until then. But *may*
|
||||||
// include undesired families for headless services until then.
|
// include undesired families for headless services until then.
|
||||||
|
|
||||||
if len(service.Spec.ClusterIP) > 0 && service.Spec.ClusterIP != corev1.ClusterIPNone { // headfull
|
if len(service.Spec.ClusterIP) > 0 && service.Spec.ClusterIP != v1.ClusterIPNone { // headfull
|
||||||
addrType := discovery.AddressTypeIPv4
|
addrType := discovery.AddressTypeIPv4
|
||||||
if utilnet.IsIPv6String(service.Spec.ClusterIP) {
|
if utilnet.IsIPv6String(service.Spec.ClusterIP) {
|
||||||
addrType = discovery.AddressTypeIPv6
|
addrType = discovery.AddressTypeIPv6
|
||||||
@ -385,9 +384,9 @@ func unchangedSlices(existingSlices, slicesToUpdate, slicesToDelete []*discovery
|
|||||||
}
|
}
|
||||||
|
|
||||||
// hintsEnabled returns true if the provided annotations include a
|
// hintsEnabled returns true if the provided annotations include a
|
||||||
// corev1.AnnotationTopologyAwareHints key with a value set to "Auto" or "auto".
|
// v1.AnnotationTopologyAwareHints key with a value set to "Auto" or "auto".
|
||||||
func hintsEnabled(annotations map[string]string) bool {
|
func hintsEnabled(annotations map[string]string) bool {
|
||||||
val, ok := annotations[corev1.AnnotationTopologyAwareHints]
|
val, ok := annotations[v1.AnnotationTopologyAwareHints]
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -21,8 +21,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
corev1 "k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
discovery "k8s.io/api/discovery/v1"
|
discovery "k8s.io/api/discovery/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@ -54,12 +53,12 @@ func TestNewEndpointSlice(t *testing.T) {
|
|||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
tweakEndpoint func(ep *corev1.Endpoints)
|
tweakEndpoint func(ep *v1.Endpoints)
|
||||||
expectedSlice discovery.EndpointSlice
|
expectedSlice discovery.EndpointSlice
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "create slice from endpoints",
|
name: "create slice from endpoints",
|
||||||
tweakEndpoint: func(ep *corev1.Endpoints) {
|
tweakEndpoint: func(ep *v1.Endpoints) {
|
||||||
},
|
},
|
||||||
expectedSlice: discovery.EndpointSlice{
|
expectedSlice: discovery.EndpointSlice{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -79,7 +78,7 @@ func TestNewEndpointSlice(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "create slice from endpoints with annotations",
|
name: "create slice from endpoints with annotations",
|
||||||
tweakEndpoint: func(ep *corev1.Endpoints) {
|
tweakEndpoint: func(ep *v1.Endpoints) {
|
||||||
annotations := map[string]string{"foo": "bar"}
|
annotations := map[string]string{"foo": "bar"}
|
||||||
ep.Annotations = annotations
|
ep.Annotations = annotations
|
||||||
},
|
},
|
||||||
@ -101,7 +100,7 @@ func TestNewEndpointSlice(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "create slice from endpoints with labels",
|
name: "create slice from endpoints with labels",
|
||||||
tweakEndpoint: func(ep *corev1.Endpoints) {
|
tweakEndpoint: func(ep *v1.Endpoints) {
|
||||||
labels := map[string]string{"foo": "bar"}
|
labels := map[string]string{"foo": "bar"}
|
||||||
ep.Labels = labels
|
ep.Labels = labels
|
||||||
},
|
},
|
||||||
@ -124,7 +123,7 @@ func TestNewEndpointSlice(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "create slice from endpoints with labels and annotations",
|
name: "create slice from endpoints with labels and annotations",
|
||||||
tweakEndpoint: func(ep *corev1.Endpoints) {
|
tweakEndpoint: func(ep *v1.Endpoints) {
|
||||||
labels := map[string]string{"foo": "bar"}
|
labels := map[string]string{"foo": "bar"}
|
||||||
ep.Labels = labels
|
ep.Labels = labels
|
||||||
annotations := map[string]string{"foo2": "bar2"}
|
annotations := map[string]string{"foo2": "bar2"}
|
||||||
@ -149,12 +148,12 @@ func TestNewEndpointSlice(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "create slice from endpoints with labels and annotations triggertime",
|
name: "create slice from endpoints with labels and annotations triggertime",
|
||||||
tweakEndpoint: func(ep *corev1.Endpoints) {
|
tweakEndpoint: func(ep *v1.Endpoints) {
|
||||||
labels := map[string]string{"foo": "bar"}
|
labels := map[string]string{"foo": "bar"}
|
||||||
ep.Labels = labels
|
ep.Labels = labels
|
||||||
annotations := map[string]string{
|
annotations := map[string]string{
|
||||||
"foo2": "bar2",
|
"foo2": "bar2",
|
||||||
corev1.EndpointsLastChangeTriggerTime: "date",
|
v1.EndpointsLastChangeTriggerTime: "date",
|
||||||
}
|
}
|
||||||
ep.Annotations = annotations
|
ep.Annotations = annotations
|
||||||
},
|
},
|
||||||
|
@ -35,7 +35,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/discovery"
|
"k8s.io/client-go/discovery"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes" // import known versions
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/client-go/metadata"
|
"k8s.io/client-go/metadata"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
@ -47,9 +47,6 @@ import (
|
|||||||
c "k8s.io/kubernetes/pkg/controller"
|
c "k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/apis/config/scheme"
|
"k8s.io/kubernetes/pkg/controller/apis/config/scheme"
|
||||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metrics"
|
"k8s.io/kubernetes/pkg/controller/garbagecollector/metrics"
|
||||||
|
|
||||||
// import known versions
|
|
||||||
_ "k8s.io/client-go/kubernetes"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ResourceResyncTime defines the resync period of the garbage collector's informers.
|
// ResourceResyncTime defines the resync period of the garbage collector's informers.
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
|
||||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
@ -592,7 +591,7 @@ func (ao ascendingOrdinal) Less(i, j int) bool {
|
|||||||
// Note that API validation has already guaranteed the maxUnavailable field to be >1 if it is an integer
|
// Note that API validation has already guaranteed the maxUnavailable field to be >1 if it is an integer
|
||||||
// or 0% < value <= 100% if it is a percentage, so we don't have to consider other cases.
|
// or 0% < value <= 100% if it is a percentage, so we don't have to consider other cases.
|
||||||
func getStatefulSetMaxUnavailable(maxUnavailable *intstr.IntOrString, replicaCount int) (int, error) {
|
func getStatefulSetMaxUnavailable(maxUnavailable *intstr.IntOrString, replicaCount int) (int, error) {
|
||||||
maxUnavailableNum, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(1)), replicaCount, false)
|
maxUnavailableNum, err := intstr.GetScaledValueFromIntOrPercent(intstr.ValueOrDefault(maxUnavailable, intstr.FromInt(1)), replicaCount, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,6 @@ import (
|
|||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
kcache "k8s.io/client-go/tools/cache"
|
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/component-helpers/storage/ephemeral"
|
"k8s.io/component-helpers/storage/ephemeral"
|
||||||
@ -57,13 +56,13 @@ type ephemeralController struct {
|
|||||||
// objects from the API server. It is shared with other controllers and
|
// objects from the API server. It is shared with other controllers and
|
||||||
// therefore the PVC objects in its store should be treated as immutable.
|
// therefore the PVC objects in its store should be treated as immutable.
|
||||||
pvcLister corelisters.PersistentVolumeClaimLister
|
pvcLister corelisters.PersistentVolumeClaimLister
|
||||||
pvcsSynced kcache.InformerSynced
|
pvcsSynced cache.InformerSynced
|
||||||
|
|
||||||
// podLister is the shared Pod lister used to fetch Pod
|
// podLister is the shared Pod lister used to fetch Pod
|
||||||
// objects from the API server. It is shared with other controllers and
|
// objects from the API server. It is shared with other controllers and
|
||||||
// therefore the Pod objects in its store should be treated as immutable.
|
// therefore the Pod objects in its store should be treated as immutable.
|
||||||
podLister corelisters.PodLister
|
podLister corelisters.PodLister
|
||||||
podSynced kcache.InformerSynced
|
podSynced cache.InformerSynced
|
||||||
|
|
||||||
// podIndexer has the common PodPVC indexer indexer installed To
|
// podIndexer has the common PodPVC indexer indexer installed To
|
||||||
// limit iteration over pods to those of interest.
|
// limit iteration over pods to those of interest.
|
||||||
@ -98,7 +97,7 @@ func NewController(
|
|||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||||
ec.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "ephemeral_volume"})
|
ec.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "ephemeral_volume"})
|
||||||
|
|
||||||
podInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: ec.enqueuePod,
|
AddFunc: ec.enqueuePod,
|
||||||
// The pod spec is immutable. Therefore the controller can ignore pod updates
|
// The pod spec is immutable. Therefore the controller can ignore pod updates
|
||||||
// because there cannot be any changes that have to be copied into the generated
|
// because there cannot be any changes that have to be copied into the generated
|
||||||
@ -106,7 +105,7 @@ func NewController(
|
|||||||
// Deletion of the PVC is handled through the owner reference and garbage collection.
|
// Deletion of the PVC is handled through the owner reference and garbage collection.
|
||||||
// Therefore pod deletions also can be ignored.
|
// Therefore pod deletions also can be ignored.
|
||||||
})
|
})
|
||||||
pvcInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
pvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
DeleteFunc: ec.onPVCDelete,
|
DeleteFunc: ec.onPVCDelete,
|
||||||
})
|
})
|
||||||
if err := common.AddPodPVCIndexerIfNotPresent(ec.podIndexer); err != nil {
|
if err := common.AddPodPVCIndexerIfNotPresent(ec.podIndexer); err != nil {
|
||||||
@ -130,7 +129,7 @@ func (ec *ephemeralController) enqueuePod(obj interface{}) {
|
|||||||
for _, vol := range pod.Spec.Volumes {
|
for _, vol := range pod.Spec.Volumes {
|
||||||
if vol.Ephemeral != nil {
|
if vol.Ephemeral != nil {
|
||||||
// It has at least one ephemeral inline volume, work on it.
|
// It has at least one ephemeral inline volume, work on it.
|
||||||
key, err := kcache.DeletionHandlingMetaNamespaceKeyFunc(pod)
|
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
runtime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", pod, err))
|
runtime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", pod, err))
|
||||||
return
|
return
|
||||||
@ -208,7 +207,7 @@ func (ec *ephemeralController) processNextWorkItem(ctx context.Context) bool {
|
|||||||
// syncHandler is invoked for each pod which might need to be processed.
|
// syncHandler is invoked for each pod which might need to be processed.
|
||||||
// If an error is returned from this function, the pod will be requeued.
|
// If an error is returned from this function, the pod will be requeued.
|
||||||
func (ec *ephemeralController) syncHandler(ctx context.Context, key string) error {
|
func (ec *ephemeralController) syncHandler(ctx context.Context, key string) error {
|
||||||
namespace, name, err := kcache.SplitMetaNamespaceKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,6 @@ import (
|
|||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
k8stesting "k8s.io/client-go/testing"
|
k8stesting "k8s.io/client-go/testing"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
kcache "k8s.io/client-go/tools/cache"
|
|
||||||
"k8s.io/component-base/metrics/testutil"
|
"k8s.io/component-base/metrics/testutil"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
@ -213,7 +212,7 @@ func makePod(name, namespace string, uid types.UID, volumes ...v1.Volume) *v1.Po
|
|||||||
}
|
}
|
||||||
|
|
||||||
func podKey(pod *v1.Pod) string {
|
func podKey(pod *v1.Pod) string {
|
||||||
key, _ := kcache.DeletionHandlingMetaNamespaceKeyFunc(testPodWithEphemeral)
|
key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(testPodWithEphemeral)
|
||||||
return key
|
return key
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,7 +40,6 @@ import (
|
|||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
kcache "k8s.io/client-go/tools/cache"
|
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
@ -80,10 +79,10 @@ type expandController struct {
|
|||||||
// objects from the API server. It is shared with other controllers and
|
// objects from the API server. It is shared with other controllers and
|
||||||
// therefore the PVC objects in its store should be treated as immutable.
|
// therefore the PVC objects in its store should be treated as immutable.
|
||||||
pvcLister corelisters.PersistentVolumeClaimLister
|
pvcLister corelisters.PersistentVolumeClaimLister
|
||||||
pvcsSynced kcache.InformerSynced
|
pvcsSynced cache.InformerSynced
|
||||||
|
|
||||||
pvLister corelisters.PersistentVolumeLister
|
pvLister corelisters.PersistentVolumeLister
|
||||||
pvSynced kcache.InformerSynced
|
pvSynced cache.InformerSynced
|
||||||
|
|
||||||
// cloud provider used by volume host
|
// cloud provider used by volume host
|
||||||
cloud cloudprovider.Interface
|
cloud cloudprovider.Interface
|
||||||
@ -145,7 +144,7 @@ func NewExpandController(
|
|||||||
expc.recorder,
|
expc.recorder,
|
||||||
blkutil)
|
blkutil)
|
||||||
|
|
||||||
pvcInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
pvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: expc.enqueuePVC,
|
AddFunc: expc.enqueuePVC,
|
||||||
UpdateFunc: func(old, new interface{}) {
|
UpdateFunc: func(old, new interface{}) {
|
||||||
oldPVC, ok := old.(*v1.PersistentVolumeClaim)
|
oldPVC, ok := old.(*v1.PersistentVolumeClaim)
|
||||||
@ -181,7 +180,7 @@ func (expc *expandController) enqueuePVC(obj interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if pvc.Status.Phase == v1.ClaimBound {
|
if pvc.Status.Phase == v1.ClaimBound {
|
||||||
key, err := kcache.DeletionHandlingMetaNamespaceKeyFunc(pvc)
|
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pvc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
runtime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", pvc, err))
|
runtime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", pvc, err))
|
||||||
return
|
return
|
||||||
@ -212,7 +211,7 @@ func (expc *expandController) processNextWorkItem(ctx context.Context) bool {
|
|||||||
// syncHandler performs actual expansion of volume. If an error is returned
|
// syncHandler performs actual expansion of volume. If an error is returned
|
||||||
// from this function - PVC will be requeued for resizing.
|
// from this function - PVC will be requeued for resizing.
|
||||||
func (expc *expandController) syncHandler(ctx context.Context, key string) error {
|
func (expc *expandController) syncHandler(ctx context.Context, key string) error {
|
||||||
namespace, name, err := kcache.SplitMetaNamespaceKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
|
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
vol "k8s.io/kubernetes/pkg/volume"
|
|
||||||
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -226,7 +225,7 @@ func newTestController(kubeClient clientset.Interface, informerFactory informers
|
|||||||
params := ControllerParameters{
|
params := ControllerParameters{
|
||||||
KubeClient: kubeClient,
|
KubeClient: kubeClient,
|
||||||
SyncPeriod: 5 * time.Second,
|
SyncPeriod: 5 * time.Second,
|
||||||
VolumePlugins: []vol.VolumePlugin{},
|
VolumePlugins: []volume.VolumePlugin{},
|
||||||
VolumeInformer: informerFactory.Core().V1().PersistentVolumes(),
|
VolumeInformer: informerFactory.Core().V1().PersistentVolumes(),
|
||||||
ClaimInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
ClaimInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||||
ClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
ClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||||
@ -614,7 +613,7 @@ func wrapTestWithPluginCalls(expectedRecycleCalls, expectedDeleteCalls []error,
|
|||||||
deleteCalls: expectedDeleteCalls,
|
deleteCalls: expectedDeleteCalls,
|
||||||
provisionCalls: expectedProvisionCalls,
|
provisionCalls: expectedProvisionCalls,
|
||||||
}
|
}
|
||||||
ctrl.volumePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, nil /* prober */, ctrl)
|
ctrl.volumePluginMgr.InitPlugins([]volume.VolumePlugin{plugin}, nil /* prober */, ctrl)
|
||||||
return toWrap(ctrl, reactor, test)
|
return toWrap(ctrl, reactor, test)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -657,7 +656,7 @@ func (t fakeCSIMigratedPluginManager) IsMigrationEnabledForPlugin(pluginName str
|
|||||||
func wrapTestWithCSIMigrationProvisionCalls(toWrap testCall) testCall {
|
func wrapTestWithCSIMigrationProvisionCalls(toWrap testCall) testCall {
|
||||||
plugin := &mockVolumePlugin{}
|
plugin := &mockVolumePlugin{}
|
||||||
return func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
|
return func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
|
||||||
ctrl.volumePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, nil /* prober */, ctrl)
|
ctrl.volumePluginMgr.InitPlugins([]volume.VolumePlugin{plugin}, nil /* prober */, ctrl)
|
||||||
ctrl.translator = fakeCSINameTranslator{}
|
ctrl.translator = fakeCSINameTranslator{}
|
||||||
ctrl.csiMigratedPluginManager = fakeCSIMigratedPluginManager{}
|
ctrl.csiMigratedPluginManager = fakeCSIMigratedPluginManager{}
|
||||||
return toWrap(ctrl, reactor, test)
|
return toWrap(ctrl, reactor, test)
|
||||||
@ -924,7 +923,7 @@ type mockVolumePlugin struct {
|
|||||||
deleteCallCounter int
|
deleteCallCounter int
|
||||||
recycleCalls []error
|
recycleCalls []error
|
||||||
recycleCallCounter int
|
recycleCallCounter int
|
||||||
provisionOptions vol.VolumeOptions
|
provisionOptions volume.VolumeOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
type provisionCall struct {
|
type provisionCall struct {
|
||||||
@ -932,12 +931,12 @@ type provisionCall struct {
|
|||||||
ret error
|
ret error
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ vol.VolumePlugin = &mockVolumePlugin{}
|
var _ volume.VolumePlugin = &mockVolumePlugin{}
|
||||||
var _ vol.RecyclableVolumePlugin = &mockVolumePlugin{}
|
var _ volume.RecyclableVolumePlugin = &mockVolumePlugin{}
|
||||||
var _ vol.DeletableVolumePlugin = &mockVolumePlugin{}
|
var _ volume.DeletableVolumePlugin = &mockVolumePlugin{}
|
||||||
var _ vol.ProvisionableVolumePlugin = &mockVolumePlugin{}
|
var _ volume.ProvisionableVolumePlugin = &mockVolumePlugin{}
|
||||||
|
|
||||||
func (plugin *mockVolumePlugin) Init(host vol.VolumeHost) error {
|
func (plugin *mockVolumePlugin) Init(host volume.VolumeHost) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -945,11 +944,11 @@ func (plugin *mockVolumePlugin) GetPluginName() string {
|
|||||||
return mockPluginName
|
return mockPluginName
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *mockVolumePlugin) GetVolumeName(spec *vol.Spec) (string, error) {
|
func (plugin *mockVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
return spec.Name(), nil
|
return spec.Name(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *mockVolumePlugin) CanSupport(spec *vol.Spec) bool {
|
func (plugin *mockVolumePlugin) CanSupport(spec *volume.Spec) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -965,21 +964,21 @@ func (plugin *mockVolumePlugin) SupportsBulkVolumeVerification() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *mockVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*vol.Spec, error) {
|
func (plugin *mockVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *mockVolumePlugin) NewMounter(spec *vol.Spec, podRef *v1.Pod, opts vol.VolumeOptions) (vol.Mounter, error) {
|
func (plugin *mockVolumePlugin) NewMounter(spec *volume.Spec, podRef *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
return nil, fmt.Errorf("Mounter is not supported by this plugin")
|
return nil, fmt.Errorf("Mounter is not supported by this plugin")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *mockVolumePlugin) NewUnmounter(name string, podUID types.UID) (vol.Unmounter, error) {
|
func (plugin *mockVolumePlugin) NewUnmounter(name string, podUID types.UID) (volume.Unmounter, error) {
|
||||||
return nil, fmt.Errorf("Unmounter is not supported by this plugin")
|
return nil, fmt.Errorf("Unmounter is not supported by this plugin")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provisioner interfaces
|
// Provisioner interfaces
|
||||||
|
|
||||||
func (plugin *mockVolumePlugin) NewProvisioner(options vol.VolumeOptions) (vol.Provisioner, error) {
|
func (plugin *mockVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||||
if len(plugin.provisionCalls) > 0 {
|
if len(plugin.provisionCalls) > 0 {
|
||||||
// mockVolumePlugin directly implements Provisioner interface
|
// mockVolumePlugin directly implements Provisioner interface
|
||||||
klog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner")
|
klog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner")
|
||||||
@ -1033,7 +1032,7 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
|
|||||||
|
|
||||||
// Deleter interfaces
|
// Deleter interfaces
|
||||||
|
|
||||||
func (plugin *mockVolumePlugin) NewDeleter(spec *vol.Spec) (vol.Deleter, error) {
|
func (plugin *mockVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||||
if len(plugin.deleteCalls) > 0 {
|
if len(plugin.deleteCalls) > 0 {
|
||||||
// mockVolumePlugin directly implements Deleter interface
|
// mockVolumePlugin directly implements Deleter interface
|
||||||
klog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter")
|
klog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter")
|
||||||
@ -1059,13 +1058,13 @@ func (plugin *mockVolumePlugin) GetPath() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *mockVolumePlugin) GetMetrics() (*vol.Metrics, error) {
|
func (plugin *mockVolumePlugin) GetMetrics() (*volume.Metrics, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recycler interfaces
|
// Recycler interfaces
|
||||||
|
|
||||||
func (plugin *mockVolumePlugin) Recycle(pvName string, spec *vol.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
|
func (plugin *mockVolumePlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
|
||||||
if len(plugin.recycleCalls) == 0 {
|
if len(plugin.recycleCalls) == 0 {
|
||||||
return fmt.Errorf("Mock plugin error: no recycleCalls configured")
|
return fmt.Errorf("Mock plugin error: no recycleCalls configured")
|
||||||
}
|
}
|
||||||
|
@ -47,7 +47,6 @@ import (
|
|||||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||||
"k8s.io/apiserver/pkg/server/options"
|
"k8s.io/apiserver/pkg/server/options"
|
||||||
"k8s.io/apiserver/pkg/server/resourceconfig"
|
"k8s.io/apiserver/pkg/server/resourceconfig"
|
||||||
"k8s.io/apiserver/pkg/server/storage"
|
|
||||||
serverstorage "k8s.io/apiserver/pkg/server/storage"
|
serverstorage "k8s.io/apiserver/pkg/server/storage"
|
||||||
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
|
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
|
||||||
"k8s.io/client-go/discovery"
|
"k8s.io/client-go/discovery"
|
||||||
@ -156,7 +155,7 @@ func TestLegacyRestStorageStrategies(t *testing.T) {
|
|||||||
LoopbackClientConfig: apiserverCfg.GenericConfig.LoopbackClientConfig,
|
LoopbackClientConfig: apiserverCfg.GenericConfig.LoopbackClientConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, apiGroupInfo, err := storageProvider.NewLegacyRESTStorage(storage.NewResourceConfig(), apiserverCfg.GenericConfig.RESTOptionsGetter)
|
_, apiGroupInfo, err := storageProvider.NewLegacyRESTStorage(serverstorage.NewResourceConfig(), apiserverCfg.GenericConfig.RESTOptionsGetter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to create legacy REST storage: %v", err)
|
t.Errorf("failed to create legacy REST storage: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ package stats
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
gomock "github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
@ -30,7 +30,6 @@ import (
|
|||||||
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||||
kubecontainertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
|
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/leaky"
|
"k8s.io/kubernetes/pkg/kubelet/leaky"
|
||||||
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
|
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||||
@ -564,7 +563,7 @@ func TestCadvisorListPodStatsWhenContainerLogFound(t *testing.T) {
|
|||||||
kuberuntime.BuildContainerLogsDirectory(prf0.Namespace, prf0.Name, types.UID(prf0.UID), cName01): containerLogStats1,
|
kuberuntime.BuildContainerLogsDirectory(prf0.Namespace, prf0.Name, types.UID(prf0.UID), cName01): containerLogStats1,
|
||||||
}
|
}
|
||||||
fakeStatsSlice := []*volume.Metrics{containerLogStats0, containerLogStats1}
|
fakeStatsSlice := []*volume.Metrics{containerLogStats0, containerLogStats1}
|
||||||
fakeOS := &kubecontainertest.FakeOS{}
|
fakeOS := &containertest.FakeOS{}
|
||||||
|
|
||||||
freeRootfsInodes := rootfsInodesFree
|
freeRootfsInodes := rootfsInodesFree
|
||||||
totalRootfsInodes := rootfsInodes
|
totalRootfsInodes := rootfsInodes
|
||||||
|
@ -29,12 +29,11 @@ import (
|
|||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
"k8s.io/kubernetes/pkg/apis/core"
|
"k8s.io/kubernetes/pkg/apis/core"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testVolumeClaim(name string, namespace string, spec api.PersistentVolumeClaimSpec) *api.PersistentVolumeClaim {
|
func testVolumeClaim(name string, namespace string, spec core.PersistentVolumeClaimSpec) *core.PersistentVolumeClaim {
|
||||||
return &api.PersistentVolumeClaim{
|
return &core.PersistentVolumeClaim{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
|
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
|
||||||
Spec: spec,
|
Spec: spec,
|
||||||
}
|
}
|
||||||
@ -42,7 +41,7 @@ func testVolumeClaim(name string, namespace string, spec api.PersistentVolumeCla
|
|||||||
|
|
||||||
func TestPersistentVolumeClaimEvaluatorUsage(t *testing.T) {
|
func TestPersistentVolumeClaimEvaluatorUsage(t *testing.T) {
|
||||||
classGold := "gold"
|
classGold := "gold"
|
||||||
validClaim := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{
|
validClaim := testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||||
Selector: &metav1.LabelSelector{
|
Selector: &metav1.LabelSelector{
|
||||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||||
{
|
{
|
||||||
@ -51,17 +50,17 @@ func TestPersistentVolumeClaimEvaluatorUsage(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
AccessModes: []api.PersistentVolumeAccessMode{
|
AccessModes: []core.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
core.ReadWriteOnce,
|
||||||
api.ReadOnlyMany,
|
core.ReadOnlyMany,
|
||||||
},
|
},
|
||||||
Resources: api.ResourceRequirements{
|
Resources: core.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: core.ResourceList{
|
||||||
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
|
core.ResourceName(core.ResourceStorage): resource.MustParse("10Gi"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
validClaimByStorageClass := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{
|
validClaimByStorageClass := testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||||
Selector: &metav1.LabelSelector{
|
Selector: &metav1.LabelSelector{
|
||||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||||
{
|
{
|
||||||
@ -70,27 +69,27 @@ func TestPersistentVolumeClaimEvaluatorUsage(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
AccessModes: []api.PersistentVolumeAccessMode{
|
AccessModes: []core.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
core.ReadWriteOnce,
|
||||||
api.ReadOnlyMany,
|
core.ReadOnlyMany,
|
||||||
},
|
},
|
||||||
Resources: api.ResourceRequirements{
|
Resources: core.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: core.ResourceList{
|
||||||
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
|
core.ResourceName(core.ResourceStorage): resource.MustParse("10Gi"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
StorageClassName: &classGold,
|
StorageClassName: &classGold,
|
||||||
})
|
})
|
||||||
|
|
||||||
validClaimWithNonIntegerStorage := validClaim.DeepCopy()
|
validClaimWithNonIntegerStorage := validClaim.DeepCopy()
|
||||||
validClaimWithNonIntegerStorage.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] = resource.MustParse("1001m")
|
validClaimWithNonIntegerStorage.Spec.Resources.Requests[core.ResourceName(core.ResourceStorage)] = resource.MustParse("1001m")
|
||||||
|
|
||||||
validClaimByStorageClassWithNonIntegerStorage := validClaimByStorageClass.DeepCopy()
|
validClaimByStorageClassWithNonIntegerStorage := validClaimByStorageClass.DeepCopy()
|
||||||
validClaimByStorageClassWithNonIntegerStorage.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] = resource.MustParse("1001m")
|
validClaimByStorageClassWithNonIntegerStorage.Spec.Resources.Requests[core.ResourceName(core.ResourceStorage)] = resource.MustParse("1001m")
|
||||||
|
|
||||||
evaluator := NewPersistentVolumeClaimEvaluator(nil)
|
evaluator := NewPersistentVolumeClaimEvaluator(nil)
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
pvc *api.PersistentVolumeClaim
|
pvc *core.PersistentVolumeClaim
|
||||||
usage corev1.ResourceList
|
usage corev1.ResourceList
|
||||||
enableRecoverFromExpansion bool
|
enableRecoverFromExpansion bool
|
||||||
}{
|
}{
|
||||||
@ -169,16 +168,16 @@ func TestPersistentVolumeClaimEvaluatorUsage(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPVCWithAllocatedResource(pvcSize, allocatedSize string) *api.PersistentVolumeClaim {
|
func getPVCWithAllocatedResource(pvcSize, allocatedSize string) *core.PersistentVolumeClaim {
|
||||||
validPVCWithAllocatedResources := testVolumeClaim("foo", "ns", api.PersistentVolumeClaimSpec{
|
validPVCWithAllocatedResources := testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: core.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: core.ResourceList{
|
||||||
core.ResourceStorage: resource.MustParse(pvcSize),
|
core.ResourceStorage: resource.MustParse(pvcSize),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
validPVCWithAllocatedResources.Status.AllocatedResources = api.ResourceList{
|
validPVCWithAllocatedResources.Status.AllocatedResources = core.ResourceList{
|
||||||
api.ResourceName(api.ResourceStorage): resource.MustParse(allocatedSize),
|
core.ResourceName(core.ResourceStorage): resource.MustParse(allocatedSize),
|
||||||
}
|
}
|
||||||
return validPVCWithAllocatedResources
|
return validPVCWithAllocatedResources
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
@ -280,7 +279,7 @@ func TestScaleUpdate(t *testing.T) {
|
|||||||
update.ResourceVersion = deployment.ResourceVersion
|
update.ResourceVersion = deployment.ResourceVersion
|
||||||
update.Spec.Replicas = 15
|
update.Spec.Replicas = 15
|
||||||
|
|
||||||
if _, _, err = storage.Scale.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(&update), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}); err != nil && !errors.IsConflict(err) {
|
if _, _, err = storage.Scale.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(&update), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}); err != nil && !apierrors.IsConflict(err) {
|
||||||
t.Fatalf("unexpected error, expecting an update conflict but got %v", err)
|
t.Fatalf("unexpected error, expecting an update conflict but got %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -427,7 +426,7 @@ func TestEtcdCreateDeploymentRollbackNoDeployment(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expected not-found-error but got nothing")
|
t.Fatalf("Expected not-found-error but got nothing")
|
||||||
}
|
}
|
||||||
if !errors.IsNotFound(storeerr.InterpretGetError(err, apps.Resource("deployments"), name)) {
|
if !apierrors.IsNotFound(storeerr.InterpretGetError(err, apps.Resource("deployments"), name)) {
|
||||||
t.Fatalf("Unexpected error returned: %#v", err)
|
t.Fatalf("Unexpected error returned: %#v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -435,7 +434,7 @@ func TestEtcdCreateDeploymentRollbackNoDeployment(t *testing.T) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Expected not-found-error but got nothing")
|
t.Fatalf("Expected not-found-error but got nothing")
|
||||||
}
|
}
|
||||||
if !errors.IsNotFound(storeerr.InterpretGetError(err, apps.Resource("deployments"), name)) {
|
if !apierrors.IsNotFound(storeerr.InterpretGetError(err, apps.Resource("deployments"), name)) {
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
@ -342,7 +341,7 @@ func TestScaleUpdate(t *testing.T) {
|
|||||||
update.ResourceVersion = rs.ResourceVersion
|
update.ResourceVersion = rs.ResourceVersion
|
||||||
update.Spec.Replicas = 15
|
update.Spec.Replicas = 15
|
||||||
|
|
||||||
if _, _, err = storage.Scale.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(&update), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}); err != nil && !errors.IsConflict(err) {
|
if _, _, err = storage.Scale.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(&update), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}); err != nil && !apierrors.IsConflict(err) {
|
||||||
t.Fatalf("unexpected error, expecting an update conflict but got %v", err)
|
t.Fatalf("unexpected error, expecting an update conflict but got %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
@ -292,7 +291,7 @@ func TestScaleUpdate(t *testing.T) {
|
|||||||
update.ResourceVersion = sts.ResourceVersion
|
update.ResourceVersion = sts.ResourceVersion
|
||||||
update.Spec.Replicas = 15
|
update.Spec.Replicas = 15
|
||||||
|
|
||||||
if _, _, err = storage.Scale.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(&update), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}); err != nil && !errors.IsConflict(err) {
|
if _, _, err = storage.Scale.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(&update), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}); err != nil && !apierrors.IsConflict(err) {
|
||||||
t.Fatalf("unexpected error, expecting an update conflict but got %v", err)
|
t.Fatalf("unexpected error, expecting an update conflict but got %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
@ -148,7 +147,7 @@ func TestRepairWithExisting(t *testing.T) {
|
|||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "192.168.1.1",
|
ClusterIP: "192.168.1.1",
|
||||||
ClusterIPs: []string{"192.168.1.1"},
|
ClusterIPs: []string{"192.168.1.1"},
|
||||||
IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
|
IPFamilies: []corev1.IPFamily{corev1.IPv4Protocol},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&corev1.Service{
|
&corev1.Service{
|
||||||
@ -156,7 +155,7 @@ func TestRepairWithExisting(t *testing.T) {
|
|||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "192.168.1.100",
|
ClusterIP: "192.168.1.100",
|
||||||
ClusterIPs: []string{"192.168.1.100"},
|
ClusterIPs: []string{"192.168.1.100"},
|
||||||
IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
|
IPFamilies: []corev1.IPFamily{corev1.IPv4Protocol},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&corev1.Service{ // outside CIDR, will be dropped
|
&corev1.Service{ // outside CIDR, will be dropped
|
||||||
@ -164,7 +163,7 @@ func TestRepairWithExisting(t *testing.T) {
|
|||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "192.168.0.1",
|
ClusterIP: "192.168.0.1",
|
||||||
ClusterIPs: []string{"192.168.0.1"},
|
ClusterIPs: []string{"192.168.0.1"},
|
||||||
IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
|
IPFamilies: []corev1.IPFamily{corev1.IPv4Protocol},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&corev1.Service{ // empty, ignored
|
&corev1.Service{ // empty, ignored
|
||||||
@ -179,7 +178,7 @@ func TestRepairWithExisting(t *testing.T) {
|
|||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "192.168.1.1",
|
ClusterIP: "192.168.1.1",
|
||||||
ClusterIPs: []string{"192.168.1.1"},
|
ClusterIPs: []string{"192.168.1.1"},
|
||||||
IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
|
IPFamilies: []corev1.IPFamily{corev1.IPv4Protocol},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&corev1.Service{ // headless
|
&corev1.Service{ // headless
|
||||||
@ -250,31 +249,31 @@ func makeIPNet(cidr string) *net.IPNet {
|
|||||||
func TestShouldWorkOnSecondary(t *testing.T) {
|
func TestShouldWorkOnSecondary(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
expectedFamilies []v1.IPFamily
|
expectedFamilies []corev1.IPFamily
|
||||||
primaryNet *net.IPNet
|
primaryNet *net.IPNet
|
||||||
secondaryNet *net.IPNet
|
secondaryNet *net.IPNet
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "primary only (v4)",
|
name: "primary only (v4)",
|
||||||
expectedFamilies: []v1.IPFamily{v1.IPv4Protocol},
|
expectedFamilies: []corev1.IPFamily{corev1.IPv4Protocol},
|
||||||
primaryNet: makeIPNet("10.0.0.0/16"),
|
primaryNet: makeIPNet("10.0.0.0/16"),
|
||||||
secondaryNet: nil,
|
secondaryNet: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "primary only (v6)",
|
name: "primary only (v6)",
|
||||||
expectedFamilies: []v1.IPFamily{v1.IPv6Protocol},
|
expectedFamilies: []corev1.IPFamily{corev1.IPv6Protocol},
|
||||||
primaryNet: makeIPNet("2000::/120"),
|
primaryNet: makeIPNet("2000::/120"),
|
||||||
secondaryNet: nil,
|
secondaryNet: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "primary and secondary provided (v4,v6)",
|
name: "primary and secondary provided (v4,v6)",
|
||||||
expectedFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol},
|
expectedFamilies: []corev1.IPFamily{corev1.IPv4Protocol, corev1.IPv6Protocol},
|
||||||
primaryNet: makeIPNet("10.0.0.0/16"),
|
primaryNet: makeIPNet("10.0.0.0/16"),
|
||||||
secondaryNet: makeIPNet("2000::/120"),
|
secondaryNet: makeIPNet("2000::/120"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "primary and secondary provided (v6,v4)",
|
name: "primary and secondary provided (v6,v4)",
|
||||||
expectedFamilies: []v1.IPFamily{v1.IPv6Protocol, v1.IPv4Protocol},
|
expectedFamilies: []corev1.IPFamily{corev1.IPv6Protocol, corev1.IPv4Protocol},
|
||||||
primaryNet: makeIPNet("2000::/120"),
|
primaryNet: makeIPNet("2000::/120"),
|
||||||
secondaryNet: makeIPNet("10.0.0.0/16"),
|
secondaryNet: makeIPNet("10.0.0.0/16"),
|
||||||
},
|
},
|
||||||
@ -295,7 +294,7 @@ func TestShouldWorkOnSecondary(t *testing.T) {
|
|||||||
t.Fatalf("expected to have allocator by family count:%v got %v", len(tc.expectedFamilies), len(repair.allocatorByFamily))
|
t.Fatalf("expected to have allocator by family count:%v got %v", len(tc.expectedFamilies), len(repair.allocatorByFamily))
|
||||||
}
|
}
|
||||||
|
|
||||||
seen := make(map[v1.IPFamily]bool)
|
seen := make(map[corev1.IPFamily]bool)
|
||||||
for _, family := range tc.expectedFamilies {
|
for _, family := range tc.expectedFamilies {
|
||||||
familySeen := true
|
familySeen := true
|
||||||
|
|
||||||
@ -488,7 +487,7 @@ func TestRepairWithExistingDualStack(t *testing.T) {
|
|||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "192.168.1.1",
|
ClusterIP: "192.168.1.1",
|
||||||
ClusterIPs: []string{"192.168.1.1", "2000::1"},
|
ClusterIPs: []string{"192.168.1.1", "2000::1"},
|
||||||
IPFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol},
|
IPFamilies: []corev1.IPFamily{corev1.IPv4Protocol, corev1.IPv6Protocol},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&corev1.Service{
|
&corev1.Service{
|
||||||
@ -496,7 +495,7 @@ func TestRepairWithExistingDualStack(t *testing.T) {
|
|||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "2000::1",
|
ClusterIP: "2000::1",
|
||||||
ClusterIPs: []string{"2000::1", "192.168.1.100"},
|
ClusterIPs: []string{"2000::1", "192.168.1.100"},
|
||||||
IPFamilies: []v1.IPFamily{v1.IPv6Protocol, v1.IPv4Protocol},
|
IPFamilies: []corev1.IPFamily{corev1.IPv6Protocol, corev1.IPv4Protocol},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&corev1.Service{
|
&corev1.Service{
|
||||||
@ -504,7 +503,7 @@ func TestRepairWithExistingDualStack(t *testing.T) {
|
|||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "2000::2",
|
ClusterIP: "2000::2",
|
||||||
ClusterIPs: []string{"2000::2"},
|
ClusterIPs: []string{"2000::2"},
|
||||||
IPFamilies: []v1.IPFamily{v1.IPv6Protocol},
|
IPFamilies: []corev1.IPFamily{corev1.IPv6Protocol},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&corev1.Service{
|
&corev1.Service{
|
||||||
@ -512,7 +511,7 @@ func TestRepairWithExistingDualStack(t *testing.T) {
|
|||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "192.168.1.90",
|
ClusterIP: "192.168.1.90",
|
||||||
ClusterIPs: []string{"192.168.1.90"},
|
ClusterIPs: []string{"192.168.1.90"},
|
||||||
IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
|
IPFamilies: []corev1.IPFamily{corev1.IPv4Protocol},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// outside CIDR, will be dropped
|
// outside CIDR, will be dropped
|
||||||
@ -521,7 +520,7 @@ func TestRepairWithExistingDualStack(t *testing.T) {
|
|||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "192.168.0.1",
|
ClusterIP: "192.168.0.1",
|
||||||
ClusterIPs: []string{"192.168.0.1"},
|
ClusterIPs: []string{"192.168.0.1"},
|
||||||
IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
|
IPFamilies: []corev1.IPFamily{corev1.IPv4Protocol},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&corev1.Service{ // outside CIDR, will be dropped
|
&corev1.Service{ // outside CIDR, will be dropped
|
||||||
@ -529,7 +528,7 @@ func TestRepairWithExistingDualStack(t *testing.T) {
|
|||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "3000::1",
|
ClusterIP: "3000::1",
|
||||||
ClusterIPs: []string{"3000::1"},
|
ClusterIPs: []string{"3000::1"},
|
||||||
IPFamilies: []v1.IPFamily{v1.IPv6Protocol},
|
IPFamilies: []corev1.IPFamily{corev1.IPv6Protocol},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&corev1.Service{
|
&corev1.Service{
|
||||||
@ -537,7 +536,7 @@ func TestRepairWithExistingDualStack(t *testing.T) {
|
|||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "192.168.0.1",
|
ClusterIP: "192.168.0.1",
|
||||||
ClusterIPs: []string{"192.168.0.1", "3000::1"},
|
ClusterIPs: []string{"192.168.0.1", "3000::1"},
|
||||||
IPFamilies: []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol},
|
IPFamilies: []corev1.IPFamily{corev1.IPv4Protocol, corev1.IPv6Protocol},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&corev1.Service{
|
&corev1.Service{
|
||||||
@ -545,7 +544,7 @@ func TestRepairWithExistingDualStack(t *testing.T) {
|
|||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "3000::1",
|
ClusterIP: "3000::1",
|
||||||
ClusterIPs: []string{"3000::1", "192.168.0.1"},
|
ClusterIPs: []string{"3000::1", "192.168.0.1"},
|
||||||
IPFamilies: []v1.IPFamily{v1.IPv6Protocol, v1.IPv4Protocol},
|
IPFamilies: []corev1.IPFamily{corev1.IPv6Protocol, corev1.IPv4Protocol},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
@ -558,7 +557,7 @@ func TestRepairWithExistingDualStack(t *testing.T) {
|
|||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "192.168.1.1",
|
ClusterIP: "192.168.1.1",
|
||||||
ClusterIPs: []string{"192.168.1.1"},
|
ClusterIPs: []string{"192.168.1.1"},
|
||||||
IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
|
IPFamilies: []corev1.IPFamily{corev1.IPv4Protocol},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&corev1.Service{ // duplicate, dropped
|
&corev1.Service{ // duplicate, dropped
|
||||||
@ -566,7 +565,7 @@ func TestRepairWithExistingDualStack(t *testing.T) {
|
|||||||
Spec: corev1.ServiceSpec{
|
Spec: corev1.ServiceSpec{
|
||||||
ClusterIP: "2000::2",
|
ClusterIP: "2000::2",
|
||||||
ClusterIPs: []string{"2000::2"},
|
ClusterIPs: []string{"2000::2"},
|
||||||
IPFamilies: []v1.IPFamily{v1.IPv6Protocol},
|
IPFamilies: []corev1.IPFamily{corev1.IPv6Protocol},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -24,7 +24,6 @@ import (
|
|||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
rbacapiv1 "k8s.io/api/rbac/v1"
|
rbacapiv1 "k8s.io/api/rbac/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
@ -153,7 +152,7 @@ type PolicyData struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func isConflictOrServiceUnavailable(err error) bool {
|
func isConflictOrServiceUnavailable(err error) bool {
|
||||||
return errors.IsConflict(err) || errors.IsServiceUnavailable(err)
|
return apierrors.IsConflict(err) || apierrors.IsServiceUnavailable(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func retryOnConflictOrServiceUnavailable(backoff wait.Backoff, fn func() error) error {
|
func retryOnConflictOrServiceUnavailable(backoff wait.Backoff, fn func() error) error {
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
|
||||||
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||||
@ -317,7 +316,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
p, err := NewFit(&args, fh, feature.Features{})
|
p, err := NewFit(&args, fh, plfeature.Features{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -540,7 +539,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
p, err := NewFit(&args, fh, feature.Features{})
|
p, err := NewFit(&args, fh, plfeature.Features{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error: %v", err)
|
t.Fatalf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -24,16 +24,14 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
authenticationv1 "k8s.io/api/authentication/v1"
|
authenticationv1 "k8s.io/api/authentication/v1"
|
||||||
api "k8s.io/api/core/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
storage "k8s.io/api/storage/v1"
|
storage "k8s.io/api/storage/v1"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -91,7 +89,7 @@ func TestMounterGetPath(t *testing.T) {
|
|||||||
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
|
||||||
mounter, err := plug.NewMounter(
|
mounter, err := plug.NewMounter(
|
||||||
spec,
|
spec,
|
||||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
&corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||||
volume.VolumeOptions{},
|
volume.VolumeOptions{},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -186,9 +184,9 @@ func TestMounterSetUp(t *testing.T) {
|
|||||||
|
|
||||||
mounter, err := plug.NewMounter(
|
mounter, err := plug.NewMounter(
|
||||||
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
|
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
|
||||||
&api.Pod{
|
&corev1.Pod{
|
||||||
ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns, Name: testPod},
|
ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns, Name: testPod},
|
||||||
Spec: api.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
ServiceAccountName: testAccount,
|
ServiceAccountName: testAccount,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -328,7 +326,7 @@ func TestMounterSetUpSimple(t *testing.T) {
|
|||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
mounter, err := plug.NewMounter(
|
mounter, err := plug.NewMounter(
|
||||||
tc.spec(tc.fsType, tc.options),
|
tc.spec(tc.fsType, tc.options),
|
||||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
|
&corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
|
||||||
volume.VolumeOptions{},
|
volume.VolumeOptions{},
|
||||||
)
|
)
|
||||||
if tc.shouldFail && err != nil {
|
if tc.shouldFail && err != nil {
|
||||||
@ -445,7 +443,7 @@ func TestMounterSetupWithStatusTracking(t *testing.T) {
|
|||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
spec: func(fsType string, options []string) *volume.Spec {
|
spec: func(fsType string, options []string) *volume.Spec {
|
||||||
pv := makeTestPV("pv5", 20, testDriver, "vol6")
|
pv := makeTestPV("pv5", 20, testDriver, "vol6")
|
||||||
pv.Spec.PersistentVolumeSource.CSI.NodePublishSecretRef = &api.SecretReference{
|
pv.Spec.PersistentVolumeSource.CSI.NodePublishSecretRef = &corev1.SecretReference{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
Namespace: "default",
|
Namespace: "default",
|
||||||
}
|
}
|
||||||
@ -462,7 +460,7 @@ func TestMounterSetupWithStatusTracking(t *testing.T) {
|
|||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
mounter, err := plug.NewMounter(
|
mounter, err := plug.NewMounter(
|
||||||
tc.spec("ext4", []string{}),
|
tc.spec("ext4", []string{}),
|
||||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
|
&corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
|
||||||
volume.VolumeOptions{},
|
volume.VolumeOptions{},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -565,7 +563,7 @@ func TestMounterSetUpWithInline(t *testing.T) {
|
|||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
mounter, err := plug.NewMounter(
|
mounter, err := plug.NewMounter(
|
||||||
tc.spec(tc.fsType, tc.options),
|
tc.spec(tc.fsType, tc.options),
|
||||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
|
&corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
|
||||||
volume.VolumeOptions{},
|
volume.VolumeOptions{},
|
||||||
)
|
)
|
||||||
if tc.shouldFail && err != nil {
|
if tc.shouldFail && err != nil {
|
||||||
@ -650,7 +648,7 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
accessModes []api.PersistentVolumeAccessMode
|
accessModes []corev1.PersistentVolumeAccessMode
|
||||||
readOnly bool
|
readOnly bool
|
||||||
fsType string
|
fsType string
|
||||||
setFsGroup bool
|
setFsGroup bool
|
||||||
@ -663,16 +661,16 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "default fstype, with no fsgroup (should not apply fsgroup)",
|
name: "default fstype, with no fsgroup (should not apply fsgroup)",
|
||||||
accessModes: []api.PersistentVolumeAccessMode{
|
accessModes: []corev1.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
corev1.ReadWriteOnce,
|
||||||
},
|
},
|
||||||
readOnly: false,
|
readOnly: false,
|
||||||
fsType: "",
|
fsType: "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "default fstype with fsgroup (should not apply fsgroup)",
|
name: "default fstype with fsgroup (should not apply fsgroup)",
|
||||||
accessModes: []api.PersistentVolumeAccessMode{
|
accessModes: []corev1.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
corev1.ReadWriteOnce,
|
||||||
},
|
},
|
||||||
readOnly: false,
|
readOnly: false,
|
||||||
fsType: "",
|
fsType: "",
|
||||||
@ -681,9 +679,9 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "fstype, fsgroup, RWM, ROM provided (should not apply fsgroup)",
|
name: "fstype, fsgroup, RWM, ROM provided (should not apply fsgroup)",
|
||||||
accessModes: []api.PersistentVolumeAccessMode{
|
accessModes: []corev1.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteMany,
|
corev1.ReadWriteMany,
|
||||||
api.ReadOnlyMany,
|
corev1.ReadOnlyMany,
|
||||||
},
|
},
|
||||||
fsType: "ext4",
|
fsType: "ext4",
|
||||||
setFsGroup: true,
|
setFsGroup: true,
|
||||||
@ -691,8 +689,8 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "fstype, fsgroup, RWO, but readOnly (should not apply fsgroup)",
|
name: "fstype, fsgroup, RWO, but readOnly (should not apply fsgroup)",
|
||||||
accessModes: []api.PersistentVolumeAccessMode{
|
accessModes: []corev1.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
corev1.ReadWriteOnce,
|
||||||
},
|
},
|
||||||
readOnly: true,
|
readOnly: true,
|
||||||
fsType: "ext4",
|
fsType: "ext4",
|
||||||
@ -701,8 +699,8 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "fstype, fsgroup, RWO provided (should apply fsgroup)",
|
name: "fstype, fsgroup, RWO provided (should apply fsgroup)",
|
||||||
accessModes: []api.PersistentVolumeAccessMode{
|
accessModes: []corev1.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
corev1.ReadWriteOnce,
|
||||||
},
|
},
|
||||||
fsType: "ext4",
|
fsType: "ext4",
|
||||||
setFsGroup: true,
|
setFsGroup: true,
|
||||||
@ -710,8 +708,8 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "fstype, fsgroup, RWO provided, FSGroupPolicy ReadWriteOnceWithFSType (should apply fsgroup)",
|
name: "fstype, fsgroup, RWO provided, FSGroupPolicy ReadWriteOnceWithFSType (should apply fsgroup)",
|
||||||
accessModes: []api.PersistentVolumeAccessMode{
|
accessModes: []corev1.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
corev1.ReadWriteOnce,
|
||||||
},
|
},
|
||||||
fsType: "ext4",
|
fsType: "ext4",
|
||||||
setFsGroup: true,
|
setFsGroup: true,
|
||||||
@ -721,8 +719,8 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "default fstype with no fsgroup, FSGroupPolicy ReadWriteOnceWithFSType (should not apply fsgroup)",
|
name: "default fstype with no fsgroup, FSGroupPolicy ReadWriteOnceWithFSType (should not apply fsgroup)",
|
||||||
accessModes: []api.PersistentVolumeAccessMode{
|
accessModes: []corev1.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
corev1.ReadWriteOnce,
|
||||||
},
|
},
|
||||||
readOnly: false,
|
readOnly: false,
|
||||||
fsType: "",
|
fsType: "",
|
||||||
@ -731,8 +729,8 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "default fstype with fsgroup, FSGroupPolicy ReadWriteOnceWithFSType (should not apply fsgroup)",
|
name: "default fstype with fsgroup, FSGroupPolicy ReadWriteOnceWithFSType (should not apply fsgroup)",
|
||||||
accessModes: []api.PersistentVolumeAccessMode{
|
accessModes: []corev1.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
corev1.ReadWriteOnce,
|
||||||
},
|
},
|
||||||
readOnly: false,
|
readOnly: false,
|
||||||
fsType: "",
|
fsType: "",
|
||||||
@ -743,8 +741,8 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "fstype, fsgroup, RWO provided, readonly, FSGroupPolicy ReadWriteOnceWithFSType (should not apply fsgroup)",
|
name: "fstype, fsgroup, RWO provided, readonly, FSGroupPolicy ReadWriteOnceWithFSType (should not apply fsgroup)",
|
||||||
accessModes: []api.PersistentVolumeAccessMode{
|
accessModes: []corev1.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
corev1.ReadWriteOnce,
|
||||||
},
|
},
|
||||||
readOnly: true,
|
readOnly: true,
|
||||||
fsType: "ext4",
|
fsType: "ext4",
|
||||||
@ -755,8 +753,8 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "fstype, fsgroup, RWX provided, FSGroupPolicy ReadWriteOnceWithFSType (should not apply fsgroup)",
|
name: "fstype, fsgroup, RWX provided, FSGroupPolicy ReadWriteOnceWithFSType (should not apply fsgroup)",
|
||||||
accessModes: []api.PersistentVolumeAccessMode{
|
accessModes: []corev1.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteMany,
|
corev1.ReadWriteMany,
|
||||||
},
|
},
|
||||||
readOnly: false,
|
readOnly: false,
|
||||||
fsType: "ext4",
|
fsType: "ext4",
|
||||||
@ -767,8 +765,8 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "fstype, fsgroup, RWO provided, FSGroupPolicy None (should not apply fsgroup)",
|
name: "fstype, fsgroup, RWO provided, FSGroupPolicy None (should not apply fsgroup)",
|
||||||
accessModes: []api.PersistentVolumeAccessMode{
|
accessModes: []corev1.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
corev1.ReadWriteOnce,
|
||||||
},
|
},
|
||||||
fsType: "ext4",
|
fsType: "ext4",
|
||||||
setFsGroup: true,
|
setFsGroup: true,
|
||||||
@ -778,8 +776,8 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "fstype, fsgroup, RWO provided, readOnly, FSGroupPolicy File (should apply fsgroup)",
|
name: "fstype, fsgroup, RWO provided, readOnly, FSGroupPolicy File (should apply fsgroup)",
|
||||||
accessModes: []api.PersistentVolumeAccessMode{
|
accessModes: []corev1.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
corev1.ReadWriteOnce,
|
||||||
},
|
},
|
||||||
readOnly: true,
|
readOnly: true,
|
||||||
fsType: "ext4",
|
fsType: "ext4",
|
||||||
@ -844,7 +842,7 @@ func TestMounterSetUpWithFSGroup(t *testing.T) {
|
|||||||
|
|
||||||
mounter, err := plug.NewMounter(
|
mounter, err := plug.NewMounter(
|
||||||
spec,
|
spec,
|
||||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
&corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
|
||||||
volume.VolumeOptions{},
|
volume.VolumeOptions{},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1063,9 +1061,9 @@ func TestPodServiceAccountTokenAttrs(t *testing.T) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
mounter, err := plug.NewMounter(
|
mounter, err := plug.NewMounter(
|
||||||
volume.NewSpecFromVolume(makeTestVol("test", testDriver)),
|
volume.NewSpecFromVolume(makeTestVol("test", testDriver)),
|
||||||
&api.Pod{
|
&corev1.Pod{
|
||||||
ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns, Name: testPod},
|
ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns, Name: testPod},
|
||||||
Spec: api.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
ServiceAccountName: testAccount,
|
ServiceAccountName: testAccount,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1107,7 +1105,7 @@ func Test_csiMountMgr_supportsFSGroup(t *testing.T) {
|
|||||||
readOnly bool
|
readOnly bool
|
||||||
supportsSELinux bool
|
supportsSELinux bool
|
||||||
spec *volume.Spec
|
spec *volume.Spec
|
||||||
pod *api.Pod
|
pod *corev1.Pod
|
||||||
podUID types.UID
|
podUID types.UID
|
||||||
publishContext map[string]string
|
publishContext map[string]string
|
||||||
kubeVolHost volume.KubeletVolumeHost
|
kubeVolHost volume.KubeletVolumeHost
|
||||||
@ -1177,9 +1175,9 @@ func Test_csiMountMgr_supportsFSGroup(t *testing.T) {
|
|||||||
driverPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
|
driverPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||||
},
|
},
|
||||||
fields: fields{
|
fields: fields{
|
||||||
spec: volume.NewSpecFromPersistentVolume(&api.PersistentVolume{
|
spec: volume.NewSpecFromPersistentVolume(&corev1.PersistentVolume{
|
||||||
Spec: api.PersistentVolumeSpec{
|
Spec: corev1.PersistentVolumeSpec{
|
||||||
AccessModes: []api.PersistentVolumeAccessMode{},
|
AccessModes: []corev1.PersistentVolumeAccessMode{},
|
||||||
},
|
},
|
||||||
}, true),
|
}, true),
|
||||||
},
|
},
|
||||||
@ -1193,9 +1191,9 @@ func Test_csiMountMgr_supportsFSGroup(t *testing.T) {
|
|||||||
driverPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
|
driverPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||||
},
|
},
|
||||||
fields: fields{
|
fields: fields{
|
||||||
spec: volume.NewSpecFromPersistentVolume(&api.PersistentVolume{
|
spec: volume.NewSpecFromPersistentVolume(&corev1.PersistentVolume{
|
||||||
Spec: api.PersistentVolumeSpec{
|
Spec: corev1.PersistentVolumeSpec{
|
||||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
|
||||||
},
|
},
|
||||||
}, true),
|
}, true),
|
||||||
},
|
},
|
||||||
@ -1209,9 +1207,9 @@ func Test_csiMountMgr_supportsFSGroup(t *testing.T) {
|
|||||||
driverPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
|
driverPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||||
},
|
},
|
||||||
fields: fields{
|
fields: fields{
|
||||||
spec: volume.NewSpecFromVolume(&api.Volume{
|
spec: volume.NewSpecFromVolume(&corev1.Volume{
|
||||||
VolumeSource: api.VolumeSource{
|
VolumeSource: corev1.VolumeSource{
|
||||||
CSI: &api.CSIVolumeSource{
|
CSI: &corev1.CSIVolumeSource{
|
||||||
Driver: testDriver,
|
Driver: testDriver,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -35,7 +35,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
|
||||||
ioutil "k8s.io/kubernetes/pkg/volume/util"
|
ioutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||||
)
|
)
|
||||||
@ -224,7 +223,7 @@ func (plugin *iscsiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*v
|
|||||||
// leave the global mount still mounted, while marking the volume as unused.
|
// leave the global mount still mounted, while marking the volume as unused.
|
||||||
// The volume can then be mounted on several nodes, resulting in volume
|
// The volume can then be mounted on several nodes, resulting in volume
|
||||||
// corruption.
|
// corruption.
|
||||||
paths, err := util.GetReliableMountRefs(mounter, mountPath)
|
paths, err := ioutil.GetReliableMountRefs(mounter, mountPath)
|
||||||
if io.IsInconsistentReadError(err) {
|
if io.IsInconsistentReadError(err) {
|
||||||
klog.Errorf("Failed to read mount refs from /proc/mounts for %s: %s", mountPath, err)
|
klog.Errorf("Failed to read mount refs from /proc/mounts for %s: %s", mountPath, err)
|
||||||
klog.Errorf("Kubelet cannot unmount volume at %s, please unmount it and all mounts of the same device manually.", mountPath)
|
klog.Errorf("Kubelet cannot unmount volume at %s, please unmount it and all mounts of the same device manually.", mountPath)
|
||||||
|
@ -37,7 +37,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
utiltesting "k8s.io/client-go/util/testing"
|
utiltesting "k8s.io/client-go/util/testing"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
. "k8s.io/kubernetes/pkg/volume"
|
|
||||||
"k8s.io/kubernetes/pkg/volume/util"
|
"k8s.io/kubernetes/pkg/volume/util"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
@ -150,9 +149,9 @@ func makeFakeOutput(output string, rc int) testingexec.FakeAction {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ProbeVolumePlugins(config VolumeConfig) []VolumePlugin {
|
func ProbeVolumePlugins(config volume.VolumeConfig) []volume.VolumePlugin {
|
||||||
if _, ok := config.OtherAttributes["fake-property"]; ok {
|
if _, ok := config.OtherAttributes["fake-property"]; ok {
|
||||||
return []VolumePlugin{
|
return []volume.VolumePlugin{
|
||||||
&FakeVolumePlugin{
|
&FakeVolumePlugin{
|
||||||
PluginName: "fake-plugin",
|
PluginName: "fake-plugin",
|
||||||
Host: nil,
|
Host: nil,
|
||||||
@ -160,7 +159,7 @@ func ProbeVolumePlugins(config VolumeConfig) []VolumePlugin {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return []VolumePlugin{&FakeVolumePlugin{PluginName: "fake-plugin"}}
|
return []volume.VolumePlugin{&FakeVolumePlugin{PluginName: "fake-plugin"}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FakeVolumePlugin is useful for testing. It tries to be a fully compliant
|
// FakeVolumePlugin is useful for testing. It tries to be a fully compliant
|
||||||
@ -170,9 +169,9 @@ func ProbeVolumePlugins(config VolumeConfig) []VolumePlugin {
|
|||||||
type FakeVolumePlugin struct {
|
type FakeVolumePlugin struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
PluginName string
|
PluginName string
|
||||||
Host VolumeHost
|
Host volume.VolumeHost
|
||||||
Config VolumeConfig
|
Config volume.VolumeConfig
|
||||||
LastProvisionerOptions VolumeOptions
|
LastProvisionerOptions volume.VolumeOptions
|
||||||
NewAttacherCallCount int
|
NewAttacherCallCount int
|
||||||
NewDetacherCallCount int
|
NewDetacherCallCount int
|
||||||
NodeExpandCallCount int
|
NodeExpandCallCount int
|
||||||
@ -187,7 +186,7 @@ type FakeVolumePlugin struct {
|
|||||||
NonAttachable bool
|
NonAttachable bool
|
||||||
|
|
||||||
// Add callbacks as needed
|
// Add callbacks as needed
|
||||||
WaitForAttachHook func(spec *Spec, devicePath string, pod *v1.Pod, spectimeout time.Duration) (string, error)
|
WaitForAttachHook func(spec *volume.Spec, devicePath string, pod *v1.Pod, spectimeout time.Duration) (string, error)
|
||||||
UnmountDeviceHook func(globalMountPath string) error
|
UnmountDeviceHook func(globalMountPath string) error
|
||||||
|
|
||||||
Mounters []*FakeVolume
|
Mounters []*FakeVolume
|
||||||
@ -198,15 +197,15 @@ type FakeVolumePlugin struct {
|
|||||||
BlockVolumeUnmappers []*FakeVolume
|
BlockVolumeUnmappers []*FakeVolume
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ VolumePlugin = &FakeVolumePlugin{}
|
var _ volume.VolumePlugin = &FakeVolumePlugin{}
|
||||||
var _ BlockVolumePlugin = &FakeVolumePlugin{}
|
var _ volume.BlockVolumePlugin = &FakeVolumePlugin{}
|
||||||
var _ RecyclableVolumePlugin = &FakeVolumePlugin{}
|
var _ volume.RecyclableVolumePlugin = &FakeVolumePlugin{}
|
||||||
var _ DeletableVolumePlugin = &FakeVolumePlugin{}
|
var _ volume.DeletableVolumePlugin = &FakeVolumePlugin{}
|
||||||
var _ ProvisionableVolumePlugin = &FakeVolumePlugin{}
|
var _ volume.ProvisionableVolumePlugin = &FakeVolumePlugin{}
|
||||||
var _ AttachableVolumePlugin = &FakeVolumePlugin{}
|
var _ volume.AttachableVolumePlugin = &FakeVolumePlugin{}
|
||||||
var _ VolumePluginWithAttachLimits = &FakeVolumePlugin{}
|
var _ volume.VolumePluginWithAttachLimits = &FakeVolumePlugin{}
|
||||||
var _ DeviceMountableVolumePlugin = &FakeVolumePlugin{}
|
var _ volume.DeviceMountableVolumePlugin = &FakeVolumePlugin{}
|
||||||
var _ NodeExpandableVolumePlugin = &FakeVolumePlugin{}
|
var _ volume.NodeExpandableVolumePlugin = &FakeVolumePlugin{}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) getFakeVolume(list *[]*FakeVolume) *FakeVolume {
|
func (plugin *FakeVolumePlugin) getFakeVolume(list *[]*FakeVolume) *FakeVolume {
|
||||||
if list != nil {
|
if list != nil {
|
||||||
@ -233,7 +232,7 @@ func (plugin *FakeVolumePlugin) getFakeVolume(list *[]*FakeVolume) *FakeVolume {
|
|||||||
return volume
|
return volume
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) Init(host VolumeHost) error {
|
func (plugin *FakeVolumePlugin) Init(host volume.VolumeHost) error {
|
||||||
plugin.Lock()
|
plugin.Lock()
|
||||||
defer plugin.Unlock()
|
defer plugin.Unlock()
|
||||||
plugin.Host = host
|
plugin.Host = host
|
||||||
@ -246,7 +245,7 @@ func (plugin *FakeVolumePlugin) GetPluginName() string {
|
|||||||
return plugin.PluginName
|
return plugin.PluginName
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) GetVolumeName(spec *Spec) (string, error) {
|
func (plugin *FakeVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
var volumeName string
|
var volumeName string
|
||||||
if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
|
if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
|
||||||
volumeName = spec.Volume.GCEPersistentDisk.PDName
|
volumeName = spec.Volume.GCEPersistentDisk.PDName
|
||||||
@ -262,7 +261,7 @@ func (plugin *FakeVolumePlugin) GetVolumeName(spec *Spec) (string, error) {
|
|||||||
return volumeName, nil
|
return volumeName, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) CanSupport(spec *Spec) bool {
|
func (plugin *FakeVolumePlugin) CanSupport(spec *volume.Spec) bool {
|
||||||
// TODO: maybe pattern-match on spec.Name() to decide?
|
// TODO: maybe pattern-match on spec.Name() to decide?
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -279,17 +278,17 @@ func (plugin *FakeVolumePlugin) SupportsBulkVolumeVerification() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) NewMounter(spec *Spec, pod *v1.Pod, opts VolumeOptions) (Mounter, error) {
|
func (plugin *FakeVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
plugin.Lock()
|
plugin.Lock()
|
||||||
defer plugin.Unlock()
|
defer plugin.Unlock()
|
||||||
volume := plugin.getFakeVolume(&plugin.Mounters)
|
fakeVolume := plugin.getFakeVolume(&plugin.Mounters)
|
||||||
volume.Lock()
|
fakeVolume.Lock()
|
||||||
defer volume.Unlock()
|
defer fakeVolume.Unlock()
|
||||||
volume.PodUID = pod.UID
|
fakeVolume.PodUID = pod.UID
|
||||||
volume.VolName = spec.Name()
|
fakeVolume.VolName = spec.Name()
|
||||||
volume.Plugin = plugin
|
fakeVolume.Plugin = plugin
|
||||||
volume.MetricsNil = MetricsNil{}
|
fakeVolume.MetricsNil = volume.MetricsNil{}
|
||||||
return volume, nil
|
return fakeVolume, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) GetMounters() (Mounters []*FakeVolume) {
|
func (plugin *FakeVolumePlugin) GetMounters() (Mounters []*FakeVolume) {
|
||||||
@ -298,17 +297,17 @@ func (plugin *FakeVolumePlugin) GetMounters() (Mounters []*FakeVolume) {
|
|||||||
return plugin.Mounters
|
return plugin.Mounters
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) NewUnmounter(volName string, podUID types.UID) (Unmounter, error) {
|
func (plugin *FakeVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||||
plugin.Lock()
|
plugin.Lock()
|
||||||
defer plugin.Unlock()
|
defer plugin.Unlock()
|
||||||
volume := plugin.getFakeVolume(&plugin.Unmounters)
|
fakeVolume := plugin.getFakeVolume(&plugin.Unmounters)
|
||||||
volume.Lock()
|
fakeVolume.Lock()
|
||||||
defer volume.Unlock()
|
defer fakeVolume.Unlock()
|
||||||
volume.PodUID = podUID
|
fakeVolume.PodUID = podUID
|
||||||
volume.VolName = volName
|
fakeVolume.VolName = volName
|
||||||
volume.Plugin = plugin
|
fakeVolume.Plugin = plugin
|
||||||
volume.MetricsNil = MetricsNil{}
|
fakeVolume.MetricsNil = volume.MetricsNil{}
|
||||||
return volume, nil
|
return fakeVolume, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) GetUnmounters() (Unmounters []*FakeVolume) {
|
func (plugin *FakeVolumePlugin) GetUnmounters() (Unmounters []*FakeVolume) {
|
||||||
@ -318,7 +317,7 @@ func (plugin *FakeVolumePlugin) GetUnmounters() (Unmounters []*FakeVolume) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Block volume support
|
// Block volume support
|
||||||
func (plugin *FakeVolumePlugin) NewBlockVolumeMapper(spec *Spec, pod *v1.Pod, opts VolumeOptions) (BlockVolumeMapper, error) {
|
func (plugin *FakeVolumePlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
|
||||||
plugin.Lock()
|
plugin.Lock()
|
||||||
defer plugin.Unlock()
|
defer plugin.Unlock()
|
||||||
volume := plugin.getFakeVolume(&plugin.BlockVolumeMappers)
|
volume := plugin.getFakeVolume(&plugin.BlockVolumeMappers)
|
||||||
@ -340,7 +339,7 @@ func (plugin *FakeVolumePlugin) GetBlockVolumeMapper() (BlockVolumeMappers []*Fa
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Block volume support
|
// Block volume support
|
||||||
func (plugin *FakeVolumePlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (BlockVolumeUnmapper, error) {
|
func (plugin *FakeVolumePlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||||
plugin.Lock()
|
plugin.Lock()
|
||||||
defer plugin.Unlock()
|
defer plugin.Unlock()
|
||||||
volume := plugin.getFakeVolume(&plugin.BlockVolumeUnmappers)
|
volume := plugin.getFakeVolume(&plugin.BlockVolumeUnmappers)
|
||||||
@ -359,14 +358,14 @@ func (plugin *FakeVolumePlugin) GetBlockVolumeUnmapper() (BlockVolumeUnmappers [
|
|||||||
return plugin.BlockVolumeUnmappers
|
return plugin.BlockVolumeUnmappers
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) NewAttacher() (Attacher, error) {
|
func (plugin *FakeVolumePlugin) NewAttacher() (volume.Attacher, error) {
|
||||||
plugin.Lock()
|
plugin.Lock()
|
||||||
defer plugin.Unlock()
|
defer plugin.Unlock()
|
||||||
plugin.NewAttacherCallCount = plugin.NewAttacherCallCount + 1
|
plugin.NewAttacherCallCount = plugin.NewAttacherCallCount + 1
|
||||||
return plugin.getFakeVolume(&plugin.Attachers), nil
|
return plugin.getFakeVolume(&plugin.Attachers), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) NewDeviceMounter() (DeviceMounter, error) {
|
func (plugin *FakeVolumePlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
|
||||||
return plugin.NewAttacher()
|
return plugin.NewAttacher()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -382,7 +381,7 @@ func (plugin *FakeVolumePlugin) GetNewAttacherCallCount() int {
|
|||||||
return plugin.NewAttacherCallCount
|
return plugin.NewAttacherCallCount
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) NewDetacher() (Detacher, error) {
|
func (plugin *FakeVolumePlugin) NewDetacher() (volume.Detacher, error) {
|
||||||
plugin.Lock()
|
plugin.Lock()
|
||||||
defer plugin.Unlock()
|
defer plugin.Unlock()
|
||||||
plugin.NewDetacherCallCount = plugin.NewDetacherCallCount + 1
|
plugin.NewDetacherCallCount = plugin.NewDetacherCallCount + 1
|
||||||
@ -398,7 +397,7 @@ func (plugin *FakeVolumePlugin) NewDetacher() (Detacher, error) {
|
|||||||
return detacher, nil
|
return detacher, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) NewDeviceUnmounter() (DeviceUnmounter, error) {
|
func (plugin *FakeVolumePlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
|
||||||
return plugin.NewDetacher()
|
return plugin.NewDetacher()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -414,23 +413,23 @@ func (plugin *FakeVolumePlugin) GetNewDetacherCallCount() int {
|
|||||||
return plugin.NewDetacherCallCount
|
return plugin.NewDetacherCallCount
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) CanAttach(spec *Spec) (bool, error) {
|
func (plugin *FakeVolumePlugin) CanAttach(spec *volume.Spec) (bool, error) {
|
||||||
return !plugin.NonAttachable, nil
|
return !plugin.NonAttachable, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) CanDeviceMount(spec *Spec) (bool, error) {
|
func (plugin *FakeVolumePlugin) CanDeviceMount(spec *volume.Spec) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) Recycle(pvName string, spec *Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
|
func (plugin *FakeVolumePlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) NewDeleter(spec *Spec) (Deleter, error) {
|
func (plugin *FakeVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||||
return &FakeDeleter{"/attributesTransferredFromSpec", MetricsNil{}}, nil
|
return &FakeDeleter{"/attributesTransferredFromSpec", volume.MetricsNil{}}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) NewProvisioner(options VolumeOptions) (Provisioner, error) {
|
func (plugin *FakeVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||||
plugin.Lock()
|
plugin.Lock()
|
||||||
defer plugin.Unlock()
|
defer plugin.Unlock()
|
||||||
plugin.LastProvisionerOptions = options
|
plugin.LastProvisionerOptions = options
|
||||||
@ -441,8 +440,8 @@ func (plugin *FakeVolumePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode
|
|||||||
return []v1.PersistentVolumeAccessMode{}
|
return []v1.PersistentVolumeAccessMode{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*Spec, error) {
|
func (plugin *FakeVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||||
return &Spec{
|
return &volume.Spec{
|
||||||
Volume: &v1.Volume{
|
Volume: &v1.Volume{
|
||||||
Name: volumeName,
|
Name: volumeName,
|
||||||
},
|
},
|
||||||
@ -450,8 +449,8 @@ func (plugin *FakeVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Block volume support
|
// Block volume support
|
||||||
func (plugin *FakeVolumePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mountPath string) (*Spec, error) {
|
func (plugin *FakeVolumePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mountPath string) (*volume.Spec, error) {
|
||||||
return &Spec{
|
return &volume.Spec{
|
||||||
Volume: &v1.Volume{
|
Volume: &v1.Volume{
|
||||||
Name: volumeName,
|
Name: volumeName,
|
||||||
},
|
},
|
||||||
@ -463,7 +462,7 @@ func (plugin *FakeVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([]st
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Expandable volume support
|
// Expandable volume support
|
||||||
func (plugin *FakeVolumePlugin) ExpandVolumeDevice(spec *Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
|
func (plugin *FakeVolumePlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
|
||||||
return resource.Quantity{}, nil
|
return resource.Quantity{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -471,7 +470,7 @@ func (plugin *FakeVolumePlugin) RequiresFSResize() bool {
|
|||||||
return !plugin.DisableNodeExpansion
|
return !plugin.DisableNodeExpansion
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) NodeExpand(resizeOptions NodeResizeOptions) (bool, error) {
|
func (plugin *FakeVolumePlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) {
|
||||||
plugin.NodeExpandCallCount++
|
plugin.NodeExpandCallCount++
|
||||||
if resizeOptions.VolumeSpec.Name() == FailWithInUseVolumeName {
|
if resizeOptions.VolumeSpec.Name() == FailWithInUseVolumeName {
|
||||||
return false, volumetypes.NewFailedPreconditionError("volume-in-use")
|
return false, volumetypes.NewFailedPreconditionError("volume-in-use")
|
||||||
@ -490,7 +489,7 @@ func (plugin *FakeVolumePlugin) GetVolumeLimits() (map[string]int64, error) {
|
|||||||
return plugin.VolumeLimits, plugin.VolumeLimitsError
|
return plugin.VolumeLimits, plugin.VolumeLimitsError
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) VolumeLimitKey(spec *Spec) string {
|
func (plugin *FakeVolumePlugin) VolumeLimitKey(spec *volume.Spec) string {
|
||||||
return plugin.LimitKey
|
return plugin.LimitKey
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -505,30 +504,30 @@ func (f *FakeBasicVolumePlugin) GetPluginName() string {
|
|||||||
return f.Plugin.GetPluginName()
|
return f.Plugin.GetPluginName()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeBasicVolumePlugin) GetVolumeName(spec *Spec) (string, error) {
|
func (f *FakeBasicVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
return f.Plugin.GetVolumeName(spec)
|
return f.Plugin.GetVolumeName(spec)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CanSupport tests whether the plugin supports a given volume specification by
|
// CanSupport tests whether the plugin supports a given volume specification by
|
||||||
// testing volume spec name begins with plugin name or not.
|
// testing volume spec name begins with plugin name or not.
|
||||||
// This is useful to choose plugin by volume in testing.
|
// This is useful to choose plugin by volume in testing.
|
||||||
func (f *FakeBasicVolumePlugin) CanSupport(spec *Spec) bool {
|
func (f *FakeBasicVolumePlugin) CanSupport(spec *volume.Spec) bool {
|
||||||
return strings.HasPrefix(spec.Name(), f.GetPluginName())
|
return strings.HasPrefix(spec.Name(), f.GetPluginName())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeBasicVolumePlugin) ConstructVolumeSpec(ame, mountPath string) (*Spec, error) {
|
func (f *FakeBasicVolumePlugin) ConstructVolumeSpec(ame, mountPath string) (*volume.Spec, error) {
|
||||||
return f.Plugin.ConstructVolumeSpec(ame, mountPath)
|
return f.Plugin.ConstructVolumeSpec(ame, mountPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeBasicVolumePlugin) Init(ost VolumeHost) error {
|
func (f *FakeBasicVolumePlugin) Init(ost volume.VolumeHost) error {
|
||||||
return f.Plugin.Init(ost)
|
return f.Plugin.Init(ost)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeBasicVolumePlugin) NewMounter(spec *Spec, pod *v1.Pod, opts VolumeOptions) (Mounter, error) {
|
func (f *FakeBasicVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
return f.Plugin.NewMounter(spec, pod, opts)
|
return f.Plugin.NewMounter(spec, pod, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeBasicVolumePlugin) NewUnmounter(volName string, podUID types.UID) (Unmounter, error) {
|
func (f *FakeBasicVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||||
return f.Plugin.NewUnmounter(volName, podUID)
|
return f.Plugin.NewUnmounter(volName, podUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -544,22 +543,22 @@ func (f *FakeBasicVolumePlugin) SupportsMountOption() bool {
|
|||||||
return f.Plugin.SupportsMountOption()
|
return f.Plugin.SupportsMountOption()
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ VolumePlugin = &FakeBasicVolumePlugin{}
|
var _ volume.VolumePlugin = &FakeBasicVolumePlugin{}
|
||||||
|
|
||||||
// FakeDeviceMountableVolumePlugin implements an device mountable plugin based on FakeBasicVolumePlugin.
|
// FakeDeviceMountableVolumePlugin implements an device mountable plugin based on FakeBasicVolumePlugin.
|
||||||
type FakeDeviceMountableVolumePlugin struct {
|
type FakeDeviceMountableVolumePlugin struct {
|
||||||
FakeBasicVolumePlugin
|
FakeBasicVolumePlugin
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeDeviceMountableVolumePlugin) CanDeviceMount(spec *Spec) (bool, error) {
|
func (f *FakeDeviceMountableVolumePlugin) CanDeviceMount(spec *volume.Spec) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeDeviceMountableVolumePlugin) NewDeviceMounter() (DeviceMounter, error) {
|
func (f *FakeDeviceMountableVolumePlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
|
||||||
return f.Plugin.NewDeviceMounter()
|
return f.Plugin.NewDeviceMounter()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeDeviceMountableVolumePlugin) NewDeviceUnmounter() (DeviceUnmounter, error) {
|
func (f *FakeDeviceMountableVolumePlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
|
||||||
return f.Plugin.NewDeviceUnmounter()
|
return f.Plugin.NewDeviceUnmounter()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -567,33 +566,33 @@ func (f *FakeDeviceMountableVolumePlugin) GetDeviceMountRefs(deviceMountPath str
|
|||||||
return f.Plugin.GetDeviceMountRefs(deviceMountPath)
|
return f.Plugin.GetDeviceMountRefs(deviceMountPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ VolumePlugin = &FakeDeviceMountableVolumePlugin{}
|
var _ volume.VolumePlugin = &FakeDeviceMountableVolumePlugin{}
|
||||||
var _ DeviceMountableVolumePlugin = &FakeDeviceMountableVolumePlugin{}
|
var _ volume.DeviceMountableVolumePlugin = &FakeDeviceMountableVolumePlugin{}
|
||||||
|
|
||||||
// FakeAttachableVolumePlugin implements an attachable plugin based on FakeDeviceMountableVolumePlugin.
|
// FakeAttachableVolumePlugin implements an attachable plugin based on FakeDeviceMountableVolumePlugin.
|
||||||
type FakeAttachableVolumePlugin struct {
|
type FakeAttachableVolumePlugin struct {
|
||||||
FakeDeviceMountableVolumePlugin
|
FakeDeviceMountableVolumePlugin
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeAttachableVolumePlugin) NewAttacher() (Attacher, error) {
|
func (f *FakeAttachableVolumePlugin) NewAttacher() (volume.Attacher, error) {
|
||||||
return f.Plugin.NewAttacher()
|
return f.Plugin.NewAttacher()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeAttachableVolumePlugin) NewDetacher() (Detacher, error) {
|
func (f *FakeAttachableVolumePlugin) NewDetacher() (volume.Detacher, error) {
|
||||||
return f.Plugin.NewDetacher()
|
return f.Plugin.NewDetacher()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeAttachableVolumePlugin) CanAttach(spec *Spec) (bool, error) {
|
func (f *FakeAttachableVolumePlugin) CanAttach(spec *volume.Spec) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ VolumePlugin = &FakeAttachableVolumePlugin{}
|
var _ volume.VolumePlugin = &FakeAttachableVolumePlugin{}
|
||||||
var _ AttachableVolumePlugin = &FakeAttachableVolumePlugin{}
|
var _ volume.AttachableVolumePlugin = &FakeAttachableVolumePlugin{}
|
||||||
|
|
||||||
type FakeFileVolumePlugin struct {
|
type FakeFileVolumePlugin struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeFileVolumePlugin) Init(host VolumeHost) error {
|
func (plugin *FakeFileVolumePlugin) Init(host volume.VolumeHost) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -601,11 +600,11 @@ func (plugin *FakeFileVolumePlugin) GetPluginName() string {
|
|||||||
return "fake-file-plugin"
|
return "fake-file-plugin"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeFileVolumePlugin) GetVolumeName(spec *Spec) (string, error) {
|
func (plugin *FakeFileVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeFileVolumePlugin) CanSupport(spec *Spec) bool {
|
func (plugin *FakeFileVolumePlugin) CanSupport(spec *volume.Spec) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -621,20 +620,20 @@ func (plugin *FakeFileVolumePlugin) SupportsBulkVolumeVerification() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeFileVolumePlugin) NewMounter(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (Mounter, error) {
|
func (plugin *FakeFileVolumePlugin) NewMounter(spec *volume.Spec, podRef *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeFileVolumePlugin) NewUnmounter(name string, podUID types.UID) (Unmounter, error) {
|
func (plugin *FakeFileVolumePlugin) NewUnmounter(name string, podUID types.UID) (volume.Unmounter, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *FakeFileVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*Spec, error) {
|
func (plugin *FakeFileVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFakeFileVolumePlugin() []VolumePlugin {
|
func NewFakeFileVolumePlugin() []volume.VolumePlugin {
|
||||||
return []VolumePlugin{&FakeFileVolumePlugin{}}
|
return []volume.VolumePlugin{&FakeFileVolumePlugin{}}
|
||||||
}
|
}
|
||||||
|
|
||||||
type FakeVolume struct {
|
type FakeVolume struct {
|
||||||
@ -642,13 +641,13 @@ type FakeVolume struct {
|
|||||||
PodUID types.UID
|
PodUID types.UID
|
||||||
VolName string
|
VolName string
|
||||||
Plugin *FakeVolumePlugin
|
Plugin *FakeVolumePlugin
|
||||||
MetricsNil
|
volume.MetricsNil
|
||||||
VolumesAttached map[string]sets.String
|
VolumesAttached map[string]sets.String
|
||||||
DeviceMountState map[string]string
|
DeviceMountState map[string]string
|
||||||
VolumeMountState map[string]string
|
VolumeMountState map[string]string
|
||||||
|
|
||||||
// Add callbacks as needed
|
// Add callbacks as needed
|
||||||
WaitForAttachHook func(spec *Spec, devicePath string, pod *v1.Pod, spectimeout time.Duration) (string, error)
|
WaitForAttachHook func(spec *volume.Spec, devicePath string, pod *v1.Pod, spectimeout time.Duration) (string, error)
|
||||||
UnmountDeviceHook func(globalMountPath string) error
|
UnmountDeviceHook func(globalMountPath string) error
|
||||||
|
|
||||||
SetUpCallCount int
|
SetUpCallCount int
|
||||||
@ -667,7 +666,7 @@ type FakeVolume struct {
|
|||||||
PodDeviceMapPathCallCount int
|
PodDeviceMapPathCallCount int
|
||||||
}
|
}
|
||||||
|
|
||||||
func getUniqueVolumeName(spec *Spec) (string, error) {
|
func getUniqueVolumeName(spec *volume.Spec) (string, error) {
|
||||||
var volumeName string
|
var volumeName string
|
||||||
if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
|
if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
|
||||||
volumeName = spec.Volume.GCEPersistentDisk.PDName
|
volumeName = spec.Volume.GCEPersistentDisk.PDName
|
||||||
@ -681,15 +680,15 @@ func getUniqueVolumeName(spec *Spec) (string, error) {
|
|||||||
return volumeName, nil
|
return volumeName, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ *FakeVolume) GetAttributes() Attributes {
|
func (_ *FakeVolume) GetAttributes() volume.Attributes {
|
||||||
return Attributes{
|
return volume.Attributes{
|
||||||
ReadOnly: false,
|
ReadOnly: false,
|
||||||
Managed: true,
|
Managed: true,
|
||||||
SELinuxRelabel: true,
|
SELinuxRelabel: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) SetUp(mounterArgs MounterArgs) error {
|
func (fv *FakeVolume) SetUp(mounterArgs volume.MounterArgs) error {
|
||||||
fv.Lock()
|
fv.Lock()
|
||||||
defer fv.Unlock()
|
defer fv.Unlock()
|
||||||
err := fv.setupInternal(mounterArgs)
|
err := fv.setupInternal(mounterArgs)
|
||||||
@ -697,7 +696,7 @@ func (fv *FakeVolume) SetUp(mounterArgs MounterArgs) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) setupInternal(mounterArgs MounterArgs) error {
|
func (fv *FakeVolume) setupInternal(mounterArgs volume.MounterArgs) error {
|
||||||
if fv.VolName == TimeoutOnSetupVolumeName {
|
if fv.VolName == TimeoutOnSetupVolumeName {
|
||||||
fv.VolumeMountState[fv.VolName] = volumeMountUncertain
|
fv.VolumeMountState[fv.VolName] = volumeMountUncertain
|
||||||
return volumetypes.NewUncertainProgressError("time out on setup")
|
return volumetypes.NewUncertainProgressError("time out on setup")
|
||||||
@ -745,7 +744,7 @@ func (fv *FakeVolume) GetSetUpCallCount() int {
|
|||||||
return fv.SetUpCallCount
|
return fv.SetUpCallCount
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) SetUpAt(dir string, mounterArgs MounterArgs) error {
|
func (fv *FakeVolume) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
|
||||||
return os.MkdirAll(dir, 0750)
|
return os.MkdirAll(dir, 0750)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -831,7 +830,7 @@ func (fv *FakeVolume) GetSetUpDeviceCallCount() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Block volume support
|
// Block volume support
|
||||||
func (fv *FakeVolume) GetGlobalMapPath(spec *Spec) (string, error) {
|
func (fv *FakeVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) {
|
||||||
fv.RLock()
|
fv.RLock()
|
||||||
defer fv.RUnlock()
|
defer fv.RUnlock()
|
||||||
fv.GlobalMapPathCallCount++
|
fv.GlobalMapPathCallCount++
|
||||||
@ -954,7 +953,7 @@ func (fv *FakeVolume) GetMapPodDeviceCallCount() int {
|
|||||||
return fv.MapPodDeviceCallCount
|
return fv.MapPodDeviceCallCount
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) Attach(spec *Spec, nodeName types.NodeName) (string, error) {
|
func (fv *FakeVolume) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||||
fv.Lock()
|
fv.Lock()
|
||||||
defer fv.Unlock()
|
defer fv.Unlock()
|
||||||
fv.AttachCallCount++
|
fv.AttachCallCount++
|
||||||
@ -993,7 +992,7 @@ func (fv *FakeVolume) GetAttachCallCount() int {
|
|||||||
return fv.AttachCallCount
|
return fv.AttachCallCount
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) WaitForAttach(spec *Spec, devicePath string, pod *v1.Pod, spectimeout time.Duration) (string, error) {
|
func (fv *FakeVolume) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, spectimeout time.Duration) (string, error) {
|
||||||
fv.Lock()
|
fv.Lock()
|
||||||
defer fv.Unlock()
|
defer fv.Unlock()
|
||||||
fv.WaitForAttachCallCount++
|
fv.WaitForAttachCallCount++
|
||||||
@ -1009,14 +1008,14 @@ func (fv *FakeVolume) GetWaitForAttachCallCount() int {
|
|||||||
return fv.WaitForAttachCallCount
|
return fv.WaitForAttachCallCount
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) GetDeviceMountPath(spec *Spec) (string, error) {
|
func (fv *FakeVolume) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
||||||
fv.Lock()
|
fv.Lock()
|
||||||
defer fv.Unlock()
|
defer fv.Unlock()
|
||||||
fv.GetDeviceMountPathCallCount++
|
fv.GetDeviceMountPathCallCount++
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) mountDeviceInternal(spec *Spec, devicePath string, deviceMountPath string) error {
|
func (fv *FakeVolume) mountDeviceInternal(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||||
fv.Lock()
|
fv.Lock()
|
||||||
defer fv.Unlock()
|
defer fv.Unlock()
|
||||||
if spec.Name() == TimeoutOnMountDeviceVolumeName {
|
if spec.Name() == TimeoutOnMountDeviceVolumeName {
|
||||||
@ -1058,7 +1057,7 @@ func (fv *FakeVolume) mountDeviceInternal(spec *Spec, devicePath string, deviceM
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) MountDevice(spec *Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
|
func (fv *FakeVolume) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
|
||||||
return fv.mountDeviceInternal(spec, devicePath, deviceMountPath)
|
return fv.mountDeviceInternal(spec, devicePath, deviceMountPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1097,7 +1096,7 @@ func (fv *FakeVolume) Detach(volumeName string, nodeName types.NodeName) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) VolumesAreAttached(spec []*Spec, nodeName types.NodeName) (map[*Spec]bool, error) {
|
func (fv *FakeVolume) VolumesAreAttached(spec []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||||
fv.Lock()
|
fv.Lock()
|
||||||
defer fv.Unlock()
|
defer fv.Unlock()
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@ -1121,7 +1120,7 @@ func (fv *FakeVolume) UnmountDevice(globalMountPath string) error {
|
|||||||
|
|
||||||
type FakeDeleter struct {
|
type FakeDeleter struct {
|
||||||
path string
|
path string
|
||||||
MetricsNil
|
volume.MetricsNil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fd *FakeDeleter) Delete() error {
|
func (fd *FakeDeleter) Delete() error {
|
||||||
@ -1134,8 +1133,8 @@ func (fd *FakeDeleter) GetPath() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type FakeProvisioner struct {
|
type FakeProvisioner struct {
|
||||||
Options VolumeOptions
|
Options volume.VolumeOptions
|
||||||
Host VolumeHost
|
Host volume.VolumeHost
|
||||||
ProvisionDelaySeconds int
|
ProvisionDelaySeconds int
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1626,8 +1625,8 @@ func VerifyGetMapPodDeviceCallCount(
|
|||||||
|
|
||||||
// GetTestVolumePluginMgr creates, initializes, and returns a test volume plugin
|
// GetTestVolumePluginMgr creates, initializes, and returns a test volume plugin
|
||||||
// manager and fake volume plugin using a fake volume host.
|
// manager and fake volume plugin using a fake volume host.
|
||||||
func GetTestVolumePluginMgr(t *testing.T) (*VolumePluginMgr, *FakeVolumePlugin) {
|
func GetTestVolumePluginMgr(t *testing.T) (*volume.VolumePluginMgr, *FakeVolumePlugin) {
|
||||||
plugins := ProbeVolumePlugins(VolumeConfig{})
|
plugins := ProbeVolumePlugins(volume.VolumeConfig{})
|
||||||
v := NewFakeVolumeHost(
|
v := NewFakeVolumeHost(
|
||||||
t,
|
t,
|
||||||
"", /* rootDir */
|
"", /* rootDir */
|
||||||
@ -1637,8 +1636,8 @@ func GetTestVolumePluginMgr(t *testing.T) (*VolumePluginMgr, *FakeVolumePlugin)
|
|||||||
return v.GetPluginMgr(), plugins[0].(*FakeVolumePlugin)
|
return v.GetPluginMgr(), plugins[0].(*FakeVolumePlugin)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetTestKubeletVolumePluginMgr(t *testing.T) (*VolumePluginMgr, *FakeVolumePlugin) {
|
func GetTestKubeletVolumePluginMgr(t *testing.T) (*volume.VolumePluginMgr, *FakeVolumePlugin) {
|
||||||
plugins := ProbeVolumePlugins(VolumeConfig{})
|
plugins := ProbeVolumePlugins(volume.VolumeConfig{})
|
||||||
v := NewFakeKubeletVolumeHost(
|
v := NewFakeKubeletVolumeHost(
|
||||||
t,
|
t,
|
||||||
"", /* rootDir */
|
"", /* rootDir */
|
||||||
@ -1648,8 +1647,8 @@ func GetTestKubeletVolumePluginMgr(t *testing.T) (*VolumePluginMgr, *FakeVolumeP
|
|||||||
return v.GetPluginMgr(), plugins[0].(*FakeVolumePlugin)
|
return v.GetPluginMgr(), plugins[0].(*FakeVolumePlugin)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetTestKubeletVolumePluginMgrWithNode(t *testing.T, node *v1.Node) (*VolumePluginMgr, *FakeVolumePlugin) {
|
func GetTestKubeletVolumePluginMgrWithNode(t *testing.T, node *v1.Node) (*volume.VolumePluginMgr, *FakeVolumePlugin) {
|
||||||
plugins := ProbeVolumePlugins(VolumeConfig{})
|
plugins := ProbeVolumePlugins(volume.VolumeConfig{})
|
||||||
v := NewFakeKubeletVolumeHost(
|
v := NewFakeKubeletVolumeHost(
|
||||||
t,
|
t,
|
||||||
"", /* rootDir */
|
"", /* rootDir */
|
||||||
@ -1680,7 +1679,7 @@ func CreateTestPVC(capacity string, accessModes []v1.PersistentVolumeAccessMode)
|
|||||||
return &claim
|
return &claim
|
||||||
}
|
}
|
||||||
|
|
||||||
func MetricsEqualIgnoreTimestamp(a *Metrics, b *Metrics) bool {
|
func MetricsEqualIgnoreTimestamp(a *volume.Metrics, b *volume.Metrics) bool {
|
||||||
available := a.Available == b.Available
|
available := a.Available == b.Available
|
||||||
capacity := a.Capacity == b.Capacity
|
capacity := a.Capacity == b.Capacity
|
||||||
used := a.Used == b.Used
|
used := a.Used == b.Used
|
||||||
|
@ -24,7 +24,6 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apiserver/pkg/admission"
|
"k8s.io/apiserver/pkg/admission"
|
||||||
@ -193,7 +192,7 @@ func TestPodAdmission(t *testing.T) {
|
|||||||
whitelist: []api.Toleration{},
|
whitelist: []api.Toleration{},
|
||||||
podTolerations: []api.Toleration{},
|
podTolerations: []api.Toleration{},
|
||||||
mergedTolerations: []api.Toleration{
|
mergedTolerations: []api.Toleration{
|
||||||
{Key: v1.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, Effect: api.TaintEffectNoSchedule, TolerationSeconds: nil},
|
{Key: corev1.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, Effect: api.TaintEffectNoSchedule, TolerationSeconds: nil},
|
||||||
{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil},
|
{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil},
|
||||||
},
|
},
|
||||||
admit: true,
|
admit: true,
|
||||||
@ -219,7 +218,7 @@ func TestPodAdmission(t *testing.T) {
|
|||||||
whitelist: []api.Toleration{},
|
whitelist: []api.Toleration{},
|
||||||
podTolerations: []api.Toleration{},
|
podTolerations: []api.Toleration{},
|
||||||
mergedTolerations: []api.Toleration{
|
mergedTolerations: []api.Toleration{
|
||||||
{Key: v1.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, Effect: api.TaintEffectNoSchedule, TolerationSeconds: nil},
|
{Key: corev1.TaintNodeMemoryPressure, Operator: api.TolerationOpExists, Effect: api.TaintEffectNoSchedule, TolerationSeconds: nil},
|
||||||
{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil},
|
{Key: "testKey", Operator: "Equal", Value: "testValue", Effect: "NoSchedule", TolerationSeconds: nil},
|
||||||
},
|
},
|
||||||
admit: true,
|
admit: true,
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
@ -422,7 +421,7 @@ func (s *Plugin) mountServiceAccountToken(serviceAccount *corev1.ServiceAccount,
|
|||||||
func TokenVolumeSource() *api.ProjectedVolumeSource {
|
func TokenVolumeSource() *api.ProjectedVolumeSource {
|
||||||
return &api.ProjectedVolumeSource{
|
return &api.ProjectedVolumeSource{
|
||||||
// explicitly set default value, see #104464
|
// explicitly set default value, see #104464
|
||||||
DefaultMode: pointer.Int32(v1.ProjectedVolumeSourceDefaultMode),
|
DefaultMode: pointer.Int32(corev1.ProjectedVolumeSourceDefaultMode),
|
||||||
Sources: []api.VolumeProjection{
|
Sources: []api.VolumeProjection{
|
||||||
{
|
{
|
||||||
ServiceAccountToken: &api.ServiceAccountTokenProjection{
|
ServiceAccountToken: &api.ServiceAccountTokenProjection{
|
||||||
|
@ -25,7 +25,6 @@ import (
|
|||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/diff"
|
"k8s.io/apimachinery/pkg/util/diff"
|
||||||
@ -177,9 +176,9 @@ func TestAssignsDefaultServiceAccountAndBoundTokenWithNoSecretTokens(t *testing.
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
v1PodIn := &v1.Pod{
|
v1PodIn := &corev1.Pod{
|
||||||
Spec: v1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []v1.Container{{}},
|
Containers: []corev1.Container{{}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
v1defaults.SetObjectDefaults_Pod(v1PodIn)
|
v1defaults.SetObjectDefaults_Pod(v1PodIn)
|
||||||
@ -232,7 +231,7 @@ func TestAssignsDefaultServiceAccountAndBoundTokenWithNoSecretTokens(t *testing.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ensure result converted to v1 matches defaulted object
|
// ensure result converted to v1 matches defaulted object
|
||||||
v1PodOut := &v1.Pod{}
|
v1PodOut := &corev1.Pod{}
|
||||||
if err := v1defaults.Convert_core_Pod_To_v1_Pod(pod, v1PodOut, nil); err != nil {
|
if err := v1defaults.Convert_core_Pod_To_v1_Pod(pod, v1PodOut, nil); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
|
||||||
v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||||
apivalidation "k8s.io/apimachinery/pkg/api/validation"
|
apivalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||||
@ -73,7 +72,7 @@ type webhookConverter struct {
|
|||||||
conversionReviewVersions []string
|
conversionReviewVersions []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func webhookClientConfigForCRD(crd *apiextensionsv1.CustomResourceDefinition) *webhook.ClientConfig {
|
func webhookClientConfigForCRD(crd *v1.CustomResourceDefinition) *webhook.ClientConfig {
|
||||||
apiConfig := crd.Spec.Conversion.Webhook.ClientConfig
|
apiConfig := crd.Spec.Conversion.Webhook.ClientConfig
|
||||||
ret := webhook.ClientConfig{
|
ret := webhook.ClientConfig{
|
||||||
Name: fmt.Sprintf("conversion_webhook_for_%s", crd.Name),
|
Name: fmt.Sprintf("conversion_webhook_for_%s", crd.Name),
|
||||||
@ -97,7 +96,7 @@ func webhookClientConfigForCRD(crd *apiextensionsv1.CustomResourceDefinition) *w
|
|||||||
|
|
||||||
var _ crConverterInterface = &webhookConverter{}
|
var _ crConverterInterface = &webhookConverter{}
|
||||||
|
|
||||||
func (f *webhookConverterFactory) NewWebhookConverter(crd *apiextensionsv1.CustomResourceDefinition) (*webhookConverter, error) {
|
func (f *webhookConverterFactory) NewWebhookConverter(crd *v1.CustomResourceDefinition) (*webhookConverter, error) {
|
||||||
restClient, err := f.clientManager.HookClient(*webhookClientConfigForCRD(crd))
|
restClient, err := f.clientManager.HookClient(*webhookClientConfigForCRD(crd))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -55,7 +55,6 @@ import (
|
|||||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||||
"k8s.io/apiserver/pkg/endpoints/discovery"
|
"k8s.io/apiserver/pkg/endpoints/discovery"
|
||||||
apirequest "k8s.io/apiserver/pkg/endpoints/request"
|
apirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
|
||||||
"k8s.io/apiserver/pkg/registry/generic"
|
"k8s.io/apiserver/pkg/registry/generic"
|
||||||
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
|
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
|
||||||
"k8s.io/apiserver/pkg/registry/rest"
|
"k8s.io/apiserver/pkg/registry/rest"
|
||||||
@ -478,7 +477,7 @@ func testHandlerConversion(t *testing.T, enableWatchCache bool) {
|
|||||||
crd := multiVersionFixture.DeepCopy()
|
crd := multiVersionFixture.DeepCopy()
|
||||||
// Create a context with metav1.NamespaceNone as the namespace since multiVersionFixture
|
// Create a context with metav1.NamespaceNone as the namespace since multiVersionFixture
|
||||||
// is a cluster scoped CRD.
|
// is a cluster scoped CRD.
|
||||||
ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceNone)
|
ctx := apirequest.WithNamespace(apirequest.NewContext(), metav1.NamespaceNone)
|
||||||
if _, err := cl.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, crd, metav1.CreateOptions{}); err != nil {
|
if _, err := cl.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, crd, metav1.CreateOptions{}); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,6 @@ import (
|
|||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
genericregistry "k8s.io/apiserver/pkg/registry/generic"
|
genericregistry "k8s.io/apiserver/pkg/registry/generic"
|
||||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||||
"k8s.io/apiserver/pkg/server/options"
|
|
||||||
genericoptions "k8s.io/apiserver/pkg/server/options"
|
genericoptions "k8s.io/apiserver/pkg/server/options"
|
||||||
"k8s.io/apiserver/pkg/util/proxy"
|
"k8s.io/apiserver/pkg/util/proxy"
|
||||||
"k8s.io/apiserver/pkg/util/webhook"
|
"k8s.io/apiserver/pkg/util/webhook"
|
||||||
@ -43,7 +42,7 @@ const defaultEtcdPathPrefix = "/registry/apiextensions.kubernetes.io"
|
|||||||
|
|
||||||
// CustomResourceDefinitionsServerOptions describes the runtime options of an apiextensions-apiserver.
|
// CustomResourceDefinitionsServerOptions describes the runtime options of an apiextensions-apiserver.
|
||||||
type CustomResourceDefinitionsServerOptions struct {
|
type CustomResourceDefinitionsServerOptions struct {
|
||||||
ServerRunOptions *options.ServerRunOptions
|
ServerRunOptions *genericoptions.ServerRunOptions
|
||||||
RecommendedOptions *genericoptions.RecommendedOptions
|
RecommendedOptions *genericoptions.RecommendedOptions
|
||||||
APIEnablement *genericoptions.APIEnablementOptions
|
APIEnablement *genericoptions.APIEnablementOptions
|
||||||
|
|
||||||
@ -54,7 +53,7 @@ type CustomResourceDefinitionsServerOptions struct {
|
|||||||
// NewCustomResourceDefinitionsServerOptions creates default options of an apiextensions-apiserver.
|
// NewCustomResourceDefinitionsServerOptions creates default options of an apiextensions-apiserver.
|
||||||
func NewCustomResourceDefinitionsServerOptions(out, errOut io.Writer) *CustomResourceDefinitionsServerOptions {
|
func NewCustomResourceDefinitionsServerOptions(out, errOut io.Writer) *CustomResourceDefinitionsServerOptions {
|
||||||
o := &CustomResourceDefinitionsServerOptions{
|
o := &CustomResourceDefinitionsServerOptions{
|
||||||
ServerRunOptions: options.NewServerRunOptions(),
|
ServerRunOptions: genericoptions.NewServerRunOptions(),
|
||||||
RecommendedOptions: genericoptions.NewRecommendedOptions(
|
RecommendedOptions: genericoptions.NewRecommendedOptions(
|
||||||
defaultEtcdPathPrefix,
|
defaultEtcdPathPrefix,
|
||||||
apiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion, v1.SchemeGroupVersion),
|
apiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion, v1.SchemeGroupVersion),
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||||
clientv3 "go.etcd.io/etcd/client/v3"
|
clientv3 "go.etcd.io/etcd/client/v3"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"k8s.io/apiextensions-apiserver/pkg/cmd/server/options"
|
|
||||||
serveroptions "k8s.io/apiextensions-apiserver/pkg/cmd/server/options"
|
serveroptions "k8s.io/apiextensions-apiserver/pkg/cmd/server/options"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
|
||||||
@ -37,7 +36,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// StartDefaultServer starts a test server.
|
// StartDefaultServer starts a test server.
|
||||||
func StartDefaultServer(t servertesting.Logger, flags ...string) (func(), *rest.Config, *options.CustomResourceDefinitionsServerOptions, error) {
|
func StartDefaultServer(t servertesting.Logger, flags ...string) (func(), *rest.Config, *serveroptions.CustomResourceDefinitionsServerOptions, error) {
|
||||||
// create kubeconfig which will not actually be used. But authz/authn needs it to startup.
|
// create kubeconfig which will not actually be used. But authz/authn needs it to startup.
|
||||||
fakeKubeConfig, err := ioutil.TempFile("", "kubeconfig")
|
fakeKubeConfig, err := ioutil.TempFile("", "kubeconfig")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -22,14 +22,13 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTransformManagedFieldsToSubresource(t *testing.T) {
|
func TestTransformManagedFieldsToSubresource(t *testing.T) {
|
||||||
testTime, _ := time.ParseInLocation("2006-Jan-02", "2013-Feb-03", time.Local)
|
testTime, _ := time.ParseInLocation("2006-Jan-02", "2013-Feb-03", time.Local)
|
||||||
managedFieldTime := v1.NewTime(testTime)
|
managedFieldTime := metav1.NewTime(testTime)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
desc string
|
desc string
|
||||||
|
@ -36,7 +36,6 @@ import (
|
|||||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||||
"k8s.io/apiserver/pkg/endpoints/request"
|
"k8s.io/apiserver/pkg/endpoints/request"
|
||||||
endpointsrequest "k8s.io/apiserver/pkg/endpoints/request"
|
|
||||||
"k8s.io/apiserver/pkg/registry/rest"
|
"k8s.io/apiserver/pkg/registry/rest"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/apiserver/pkg/util/flushwriter"
|
"k8s.io/apiserver/pkg/util/flushwriter"
|
||||||
@ -272,7 +271,7 @@ func WriteObjectNegotiated(s runtime.NegotiatedSerializer, restrictions negotiat
|
|||||||
audit.LogResponseObject(req.Context(), object, gv, s)
|
audit.LogResponseObject(req.Context(), object, gv, s)
|
||||||
|
|
||||||
encoder := s.EncoderForVersion(serializer.Serializer, gv)
|
encoder := s.EncoderForVersion(serializer.Serializer, gv)
|
||||||
endpointsrequest.TrackSerializeResponseObjectLatency(req.Context(), func() {
|
request.TrackSerializeResponseObjectLatency(req.Context(), func() {
|
||||||
SerializeObject(serializer.MediaType, encoder, w, req, statusCode, object)
|
SerializeObject(serializer.MediaType, encoder, w, req, statusCode, object)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,6 @@ import (
|
|||||||
genericapifilters "k8s.io/apiserver/pkg/endpoints/filters"
|
genericapifilters "k8s.io/apiserver/pkg/endpoints/filters"
|
||||||
apiopenapi "k8s.io/apiserver/pkg/endpoints/openapi"
|
apiopenapi "k8s.io/apiserver/pkg/endpoints/openapi"
|
||||||
apirequest "k8s.io/apiserver/pkg/endpoints/request"
|
apirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||||
"k8s.io/apiserver/pkg/features"
|
|
||||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||||
genericregistry "k8s.io/apiserver/pkg/registry/generic"
|
genericregistry "k8s.io/apiserver/pkg/registry/generic"
|
||||||
"k8s.io/apiserver/pkg/server/dynamiccertificates"
|
"k8s.io/apiserver/pkg/server/dynamiccertificates"
|
||||||
@ -62,7 +61,6 @@ import (
|
|||||||
"k8s.io/apiserver/pkg/server/routes"
|
"k8s.io/apiserver/pkg/server/routes"
|
||||||
serverstore "k8s.io/apiserver/pkg/server/storage"
|
serverstore "k8s.io/apiserver/pkg/server/storage"
|
||||||
"k8s.io/apiserver/pkg/storageversion"
|
"k8s.io/apiserver/pkg/storageversion"
|
||||||
"k8s.io/apiserver/pkg/util/feature"
|
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol"
|
utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol"
|
||||||
flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request"
|
flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request"
|
||||||
@ -323,7 +321,7 @@ type AuthorizationInfo struct {
|
|||||||
func NewConfig(codecs serializer.CodecFactory) *Config {
|
func NewConfig(codecs serializer.CodecFactory) *Config {
|
||||||
defaultHealthChecks := []healthz.HealthChecker{healthz.PingHealthz, healthz.LogHealthz}
|
defaultHealthChecks := []healthz.HealthChecker{healthz.PingHealthz, healthz.LogHealthz}
|
||||||
var id string
|
var id string
|
||||||
if feature.DefaultFeatureGate.Enabled(features.APIServerIdentity) {
|
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerIdentity) {
|
||||||
id = "kube-apiserver-" + uuid.New().String()
|
id = "kube-apiserver-" + uuid.New().String()
|
||||||
}
|
}
|
||||||
lifecycleSignals := newLifecycleSignals()
|
lifecycleSignals := newLifecycleSignals()
|
||||||
@ -890,7 +888,7 @@ func installAPI(s *GenericAPIServer, c *Config) {
|
|||||||
if c.EnableDiscovery {
|
if c.EnableDiscovery {
|
||||||
s.Handler.GoRestfulContainer.Add(s.DiscoveryGroupManager.WebService())
|
s.Handler.GoRestfulContainer.Add(s.DiscoveryGroupManager.WebService())
|
||||||
}
|
}
|
||||||
if c.FlowControl != nil && feature.DefaultFeatureGate.Enabled(features.APIPriorityAndFairness) {
|
if c.FlowControl != nil && utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIPriorityAndFairness) {
|
||||||
c.FlowControl.Install(s.Handler.NonGoRestfulMux)
|
c.FlowControl.Install(s.Handler.NonGoRestfulMux)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
storage "k8s.io/api/storage/v1"
|
storage "k8s.io/api/storage/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
@ -98,7 +97,7 @@ func TestGetDiskName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTranslateAzureDiskInTreeInlineVolumeToCSI(t *testing.T) {
|
func TestTranslateAzureDiskInTreeInlineVolumeToCSI(t *testing.T) {
|
||||||
sharedBlobDiskKind := v1.AzureDedicatedBlobDisk
|
sharedBlobDiskKind := corev1.AzureDedicatedBlobDisk
|
||||||
translator := NewAzureDiskCSITranslator()
|
translator := NewAzureDiskCSITranslator()
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
@ -177,7 +176,7 @@ func TestTranslateAzureDiskInTreeInlineVolumeToCSI(t *testing.T) {
|
|||||||
func TestTranslateAzureDiskInTreePVToCSI(t *testing.T) {
|
func TestTranslateAzureDiskInTreePVToCSI(t *testing.T) {
|
||||||
translator := NewAzureDiskCSITranslator()
|
translator := NewAzureDiskCSITranslator()
|
||||||
|
|
||||||
sharedBlobDiskKind := v1.AzureDedicatedBlobDisk
|
sharedBlobDiskKind := corev1.AzureDedicatedBlobDisk
|
||||||
cachingMode := corev1.AzureDataDiskCachingMode("cachingmode")
|
cachingMode := corev1.AzureDataDiskCachingMode("cachingmode")
|
||||||
fsType := "fstype"
|
fsType := "fstype"
|
||||||
readOnly := true
|
readOnly := true
|
||||||
@ -273,7 +272,7 @@ func TestTranslateTranslateCSIPVToInTree(t *testing.T) {
|
|||||||
fsType := "fstype"
|
fsType := "fstype"
|
||||||
readOnly := true
|
readOnly := true
|
||||||
diskURI := "/subscriptions/12/resourceGroups/23/providers/Microsoft.Compute/disks/name"
|
diskURI := "/subscriptions/12/resourceGroups/23/providers/Microsoft.Compute/disks/name"
|
||||||
managed := v1.AzureManagedDisk
|
managed := corev1.AzureManagedDisk
|
||||||
|
|
||||||
translator := NewAzureDiskCSITranslator()
|
translator := NewAzureDiskCSITranslator()
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
@ -482,12 +481,12 @@ func TestTranslateInTreeStorageClassToCSI(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "some translated topology",
|
name: "some translated topology",
|
||||||
options: NewStorageClass(map[string]string{}, generateToplogySelectors(v1.LabelTopologyZone, []string{"foo"})),
|
options: NewStorageClass(map[string]string{}, generateToplogySelectors(corev1.LabelTopologyZone, []string{"foo"})),
|
||||||
expOptions: NewStorageClass(map[string]string{}, generateToplogySelectors(AzureDiskTopologyKey, []string{"foo"})),
|
expOptions: NewStorageClass(map[string]string{}, generateToplogySelectors(AzureDiskTopologyKey, []string{"foo"})),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "some translated topology with beta labels",
|
name: "some translated topology with beta labels",
|
||||||
options: NewStorageClass(map[string]string{}, generateToplogySelectors(v1.LabelFailureDomainBetaZone, []string{"foo"})),
|
options: NewStorageClass(map[string]string{}, generateToplogySelectors(corev1.LabelFailureDomainBetaZone, []string{"foo"})),
|
||||||
expOptions: NewStorageClass(map[string]string{}, generateToplogySelectors(AzureDiskTopologyKey, []string{"foo"})),
|
expOptions: NewStorageClass(map[string]string{}, generateToplogySelectors(AzureDiskTopologyKey, []string{"foo"})),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -497,17 +496,17 @@ func TestTranslateInTreeStorageClassToCSI(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "topology in regions without zones",
|
name: "topology in regions without zones",
|
||||||
options: NewStorageClass(map[string]string{}, generateToplogySelectors(v1.LabelTopologyZone, []string{"0"})),
|
options: NewStorageClass(map[string]string{}, generateToplogySelectors(corev1.LabelTopologyZone, []string{"0"})),
|
||||||
expOptions: NewStorageClass(map[string]string{}, generateToplogySelectors(AzureDiskTopologyKey, []string{""})),
|
expOptions: NewStorageClass(map[string]string{}, generateToplogySelectors(AzureDiskTopologyKey, []string{""})),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "longer topology in regions without zones",
|
name: "longer topology in regions without zones",
|
||||||
options: NewStorageClass(map[string]string{}, generateToplogySelectors(v1.LabelTopologyZone, []string{"1234"})),
|
options: NewStorageClass(map[string]string{}, generateToplogySelectors(corev1.LabelTopologyZone, []string{"1234"})),
|
||||||
expOptions: NewStorageClass(map[string]string{}, generateToplogySelectors(AzureDiskTopologyKey, []string{""})),
|
expOptions: NewStorageClass(map[string]string{}, generateToplogySelectors(AzureDiskTopologyKey, []string{""})),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "topology in regions with zones",
|
name: "topology in regions with zones",
|
||||||
options: NewStorageClass(map[string]string{}, generateToplogySelectors(v1.LabelTopologyZone, []string{"centralus-1"})),
|
options: NewStorageClass(map[string]string{}, generateToplogySelectors(corev1.LabelTopologyZone, []string{"centralus-1"})),
|
||||||
expOptions: NewStorageClass(map[string]string{}, generateToplogySelectors(AzureDiskTopologyKey, []string{"centralus-1"})),
|
expOptions: NewStorageClass(map[string]string{}, generateToplogySelectors(AzureDiskTopologyKey, []string{"centralus-1"})),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/apiserver/pkg/features"
|
|
||||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||||
"k8s.io/apiserver/pkg/server/egressselector"
|
"k8s.io/apiserver/pkg/server/egressselector"
|
||||||
@ -371,7 +370,7 @@ func (s *APIAggregator) PrepareRun() (preparedAPIAggregator, error) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.openAPIV3Config != nil && utilfeature.DefaultFeatureGate.Enabled(features.OpenAPIV3) {
|
if s.openAPIV3Config != nil && utilfeature.DefaultFeatureGate.Enabled(genericfeatures.OpenAPIV3) {
|
||||||
s.GenericAPIServer.AddPostStartHookOrDie("apiservice-openapiv3-controller", func(context genericapiserver.PostStartHookContext) error {
|
s.GenericAPIServer.AddPostStartHookOrDie("apiservice-openapiv3-controller", func(context genericapiserver.PostStartHookContext) error {
|
||||||
go s.openAPIV3AggregationController.Run(context.StopCh)
|
go s.openAPIV3AggregationController.Run(context.StopCh)
|
||||||
return nil
|
return nil
|
||||||
@ -395,7 +394,7 @@ func (s *APIAggregator) PrepareRun() (preparedAPIAggregator, error) {
|
|||||||
s.openAPIAggregationController = openapicontroller.NewAggregationController(&specDownloader, openAPIAggregator)
|
s.openAPIAggregationController = openapicontroller.NewAggregationController(&specDownloader, openAPIAggregator)
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.openAPIV3Config != nil && utilfeature.DefaultFeatureGate.Enabled(features.OpenAPIV3) {
|
if s.openAPIV3Config != nil && utilfeature.DefaultFeatureGate.Enabled(genericfeatures.OpenAPIV3) {
|
||||||
specDownloaderV3 := openapiv3aggregator.NewDownloader()
|
specDownloaderV3 := openapiv3aggregator.NewDownloader()
|
||||||
openAPIV3Aggregator, err := openapiv3aggregator.BuildAndRegisterAggregator(
|
openAPIV3Aggregator, err := openapiv3aggregator.BuildAndRegisterAggregator(
|
||||||
specDownloaderV3,
|
specDownloaderV3,
|
||||||
|
@ -25,7 +25,6 @@ import (
|
|||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
)
|
)
|
||||||
@ -50,7 +49,7 @@ func TestCompatibility(t *testing.T) {
|
|||||||
emptyObj: &ExtenderPreemptionArgs{},
|
emptyObj: &ExtenderPreemptionArgs{},
|
||||||
obj: &ExtenderPreemptionArgs{
|
obj: &ExtenderPreemptionArgs{
|
||||||
Pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podname"}},
|
Pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podname"}},
|
||||||
NodeNameToVictims: map[string]*Victims{"foo": {Pods: []*v1.Pod{&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podname"}}}, NumPDBViolations: 1}},
|
NodeNameToVictims: map[string]*Victims{"foo": {Pods: []*corev1.Pod{{ObjectMeta: metav1.ObjectMeta{Name: "podname"}}}, NumPDBViolations: 1}},
|
||||||
NodeNameToMetaVictims: map[string]*MetaVictims{"foo": {Pods: []*MetaPod{{UID: "myuid"}}, NumPDBViolations: 1}},
|
NodeNameToMetaVictims: map[string]*MetaVictims{"foo": {Pods: []*MetaPod{{UID: "myuid"}}, NumPDBViolations: 1}},
|
||||||
},
|
},
|
||||||
expectJSON: `{"Pod":{"metadata":{"name":"podname","creationTimestamp":null},"spec":{"containers":null},"status":{}},"NodeNameToVictims":{"foo":{"Pods":[{"metadata":{"name":"podname","creationTimestamp":null},"spec":{"containers":null},"status":{}}],"NumPDBViolations":1}},"NodeNameToMetaVictims":{"foo":{"Pods":[{"UID":"myuid"}],"NumPDBViolations":1}}}`,
|
expectJSON: `{"Pod":{"metadata":{"name":"podname","creationTimestamp":null},"spec":{"containers":null},"status":{}},"NodeNameToVictims":{"foo":{"Pods":[{"metadata":{"name":"podname","creationTimestamp":null},"spec":{"containers":null},"status":{}}],"NumPDBViolations":1}},"NodeNameToMetaVictims":{"foo":{"Pods":[{"UID":"myuid"}],"NumPDBViolations":1}}}`,
|
||||||
|
@ -29,7 +29,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||||
"k8s.io/cli-runtime/pkg/resource"
|
"k8s.io/cli-runtime/pkg/resource"
|
||||||
resourcecli "k8s.io/cli-runtime/pkg/resource"
|
|
||||||
policyv1client "k8s.io/client-go/kubernetes/typed/policy/v1"
|
policyv1client "k8s.io/client-go/kubernetes/typed/policy/v1"
|
||||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||||
"k8s.io/kubectl/pkg/scheme"
|
"k8s.io/kubectl/pkg/scheme"
|
||||||
@ -72,7 +71,7 @@ type PodDisruptionBudgetOpts struct {
|
|||||||
|
|
||||||
Client *policyv1client.PolicyV1Client
|
Client *policyv1client.PolicyV1Client
|
||||||
DryRunStrategy cmdutil.DryRunStrategy
|
DryRunStrategy cmdutil.DryRunStrategy
|
||||||
DryRunVerifier *resourcecli.QueryParamVerifier
|
DryRunVerifier *resource.QueryParamVerifier
|
||||||
ValidationDirective string
|
ValidationDirective string
|
||||||
|
|
||||||
genericclioptions.IOStreams
|
genericclioptions.IOStreams
|
||||||
|
@ -29,7 +29,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||||
"k8s.io/cli-runtime/pkg/resource"
|
"k8s.io/cli-runtime/pkg/resource"
|
||||||
resourcecli "k8s.io/cli-runtime/pkg/resource"
|
|
||||||
coreclient "k8s.io/client-go/kubernetes/typed/core/v1"
|
coreclient "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||||
"k8s.io/kubectl/pkg/scheme"
|
"k8s.io/kubectl/pkg/scheme"
|
||||||
@ -68,7 +67,7 @@ type QuotaOpts struct {
|
|||||||
|
|
||||||
Client *coreclient.CoreV1Client
|
Client *coreclient.CoreV1Client
|
||||||
DryRunStrategy cmdutil.DryRunStrategy
|
DryRunStrategy cmdutil.DryRunStrategy
|
||||||
DryRunVerifier *resourcecli.QueryParamVerifier
|
DryRunVerifier *resource.QueryParamVerifier
|
||||||
ValidationDirective string
|
ValidationDirective string
|
||||||
|
|
||||||
genericclioptions.IOStreams
|
genericclioptions.IOStreams
|
||||||
@ -138,7 +137,7 @@ func (o *QuotaOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []strin
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
o.DryRunVerifier = resourcecli.NewQueryParamVerifier(dynamicClient, f.OpenAPIGetter(), resource.QueryParamDryRun)
|
o.DryRunVerifier = resource.NewQueryParamVerifier(dynamicClient, f.OpenAPIGetter(), resource.QueryParamDryRun)
|
||||||
|
|
||||||
o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
|
o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -18,7 +18,6 @@ package test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/component-base/featuregate"
|
"k8s.io/component-base/featuregate"
|
||||||
"k8s.io/pod-security-admission/api"
|
"k8s.io/pod-security-admission/api"
|
||||||
)
|
)
|
||||||
@ -26,29 +25,29 @@ import (
|
|||||||
func init() {
|
func init() {
|
||||||
fixtureData_1_0 := fixtureGenerator{
|
fixtureData_1_0 := fixtureGenerator{
|
||||||
expectErrorSubstring: "procMount",
|
expectErrorSubstring: "procMount",
|
||||||
generatePass: func(p *v1.Pod) []*v1.Pod {
|
generatePass: func(p *corev1.Pod) []*corev1.Pod {
|
||||||
p = ensureSecurityContext(p)
|
p = ensureSecurityContext(p)
|
||||||
return []*corev1.Pod{
|
return []*corev1.Pod{
|
||||||
// set proc mount of container and init container to a valid value
|
// set proc mount of container and init container to a valid value
|
||||||
tweak(p, func(copy *v1.Pod) {
|
tweak(p, func(copy *corev1.Pod) {
|
||||||
validProcMountType := v1.DefaultProcMount
|
validProcMountType := corev1.DefaultProcMount
|
||||||
copy.Spec.Containers[0].SecurityContext.ProcMount = &validProcMountType
|
copy.Spec.Containers[0].SecurityContext.ProcMount = &validProcMountType
|
||||||
copy.Spec.InitContainers[0].SecurityContext.ProcMount = &validProcMountType
|
copy.Spec.InitContainers[0].SecurityContext.ProcMount = &validProcMountType
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
failRequiresFeatures: []featuregate.Feature{"ProcMountType"},
|
failRequiresFeatures: []featuregate.Feature{"ProcMountType"},
|
||||||
generateFail: func(p *v1.Pod) []*v1.Pod {
|
generateFail: func(p *corev1.Pod) []*corev1.Pod {
|
||||||
p = ensureSecurityContext(p)
|
p = ensureSecurityContext(p)
|
||||||
return []*corev1.Pod{
|
return []*corev1.Pod{
|
||||||
// set proc mount of container to a forbidden value
|
// set proc mount of container to a forbidden value
|
||||||
tweak(p, func(copy *v1.Pod) {
|
tweak(p, func(copy *corev1.Pod) {
|
||||||
unmaskedProcMountType := v1.UnmaskedProcMount
|
unmaskedProcMountType := corev1.UnmaskedProcMount
|
||||||
copy.Spec.Containers[0].SecurityContext.ProcMount = &unmaskedProcMountType
|
copy.Spec.Containers[0].SecurityContext.ProcMount = &unmaskedProcMountType
|
||||||
}),
|
}),
|
||||||
// set proc mount of init container to a forbidden value
|
// set proc mount of init container to a forbidden value
|
||||||
tweak(p, func(copy *v1.Pod) {
|
tweak(p, func(copy *corev1.Pod) {
|
||||||
unmaskedProcMountType := v1.UnmaskedProcMount
|
unmaskedProcMountType := corev1.UnmaskedProcMount
|
||||||
copy.Spec.InitContainers[0].SecurityContext.ProcMount = &unmaskedProcMountType
|
copy.Spec.InitContainers[0].SecurityContext.ProcMount = &unmaskedProcMountType
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,6 @@ import (
|
|||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
unstructuredv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -594,7 +593,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
|||||||
}
|
}
|
||||||
jsonFlunder, err := json.Marshal(testFlunder)
|
jsonFlunder, err := json.Marshal(testFlunder)
|
||||||
framework.ExpectNoError(err, "marshalling test-flunder for create using dynamic client")
|
framework.ExpectNoError(err, "marshalling test-flunder for create using dynamic client")
|
||||||
unstruct := &unstructuredv1.Unstructured{}
|
unstruct := &unstructured.Unstructured{}
|
||||||
err = unstruct.UnmarshalJSON(jsonFlunder)
|
err = unstruct.UnmarshalJSON(jsonFlunder)
|
||||||
framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client")
|
framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client")
|
||||||
_, err = dynamicClient.Create(context.TODO(), unstruct, metav1.CreateOptions{})
|
_, err = dynamicClient.Create(context.TODO(), unstruct, metav1.CreateOptions{})
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
openapiutil "k8s.io/kube-openapi/pkg/util"
|
openapiutil "k8s.io/kube-openapi/pkg/util"
|
||||||
kubeopenapispec "k8s.io/kube-openapi/pkg/validation/spec"
|
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
"sigs.k8s.io/yaml"
|
"sigs.k8s.io/yaml"
|
||||||
|
|
||||||
@ -696,7 +695,7 @@ func convertJSONSchemaProps(in []byte, out *spec.Schema) error {
|
|||||||
if err := apiextensionsv1.Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&external, &internal, nil); err != nil {
|
if err := apiextensionsv1.Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(&external, &internal, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
kubeOut := kubeopenapispec.Schema{}
|
kubeOut := spec.Schema{}
|
||||||
if err := validation.ConvertJSONSchemaPropsWithPostProcess(&internal, &kubeOut, validation.StripUnsupportedFormatsPostProcess); err != nil {
|
if err := validation.ConvertJSONSchemaPropsWithPostProcess(&internal, &kubeOut, validation.StripUnsupportedFormatsPostProcess); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -59,7 +59,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -309,7 +308,7 @@ type mockCSIDriver struct {
|
|||||||
embeddedCSIDriver *mockdriver.CSIDriver
|
embeddedCSIDriver *mockdriver.CSIDriver
|
||||||
|
|
||||||
// Additional values set during PrepareTest
|
// Additional values set during PrepareTest
|
||||||
clientSet kubernetes.Interface
|
clientSet clientset.Interface
|
||||||
driverNamespace *v1.Namespace
|
driverNamespace *v1.Namespace
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,14 +20,13 @@ import (
|
|||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
errors "k8s.io/apimachinery/pkg/util/errors"
|
"k8s.io/apimachinery/pkg/util/errors"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
|
||||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
admissionapi "k8s.io/pod-security-admission/api"
|
admissionapi "k8s.io/pod-security-admission/api"
|
||||||
)
|
)
|
||||||
@ -133,18 +132,18 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
|
|||||||
disruptiveTestTable := []disruptiveTest{
|
disruptiveTestTable := []disruptiveTest{
|
||||||
{
|
{
|
||||||
testItStmt: "Should test that pv written before kubelet restart is readable after restart.",
|
testItStmt: "Should test that pv written before kubelet restart is readable after restart.",
|
||||||
runTestFile: utils.TestKubeletRestartsAndRestoresMount,
|
runTestFile: storageutils.TestKubeletRestartsAndRestoresMount,
|
||||||
runTestBlock: utils.TestKubeletRestartsAndRestoresMap,
|
runTestBlock: storageutils.TestKubeletRestartsAndRestoresMap,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.",
|
testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.",
|
||||||
// File test is covered by subpath testsuite
|
// File test is covered by subpath testsuite
|
||||||
runTestBlock: utils.TestVolumeUnmapsFromDeletedPod,
|
runTestBlock: storageutils.TestVolumeUnmapsFromDeletedPod,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.",
|
testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.",
|
||||||
// File test is covered by subpath testsuite
|
// File test is covered by subpath testsuite
|
||||||
runTestBlock: utils.TestVolumeUnmapsFromForceDeletedPod,
|
runTestBlock: storageutils.TestVolumeUnmapsFromForceDeletedPod,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,7 +32,6 @@ import (
|
|||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
|
||||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
admissionapi "k8s.io/pod-security-admission/api"
|
admissionapi "k8s.io/pod-security-admission/api"
|
||||||
@ -506,14 +505,14 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
|
|||||||
|
|
||||||
if readSeedBase > 0 {
|
if readSeedBase > 0 {
|
||||||
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
|
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
|
||||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, readSeedBase+int64(i))
|
storageutils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, readSeedBase+int64(i))
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking if write to the volume%d works properly", index))
|
ginkgo.By(fmt.Sprintf("Checking if write to the volume%d works properly", index))
|
||||||
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, writeSeedBase+int64(i))
|
storageutils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, writeSeedBase+int64(i))
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
|
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
|
||||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, writeSeedBase+int64(i))
|
storageutils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, writeSeedBase+int64(i))
|
||||||
}
|
}
|
||||||
|
|
||||||
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||||
@ -599,7 +598,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
|||||||
framework.Failf("Number of pods shouldn't be less than 1, but got %d", len(pods))
|
framework.Failf("Number of pods shouldn't be less than 1, but got %d", len(pods))
|
||||||
}
|
}
|
||||||
// byteLen should be the size of a sector to enable direct I/O
|
// byteLen should be the size of a sector to enable direct I/O
|
||||||
byteLen = utils.GetSectorSize(f, pods[0], path)
|
byteLen = storageutils.GetSectorSize(f, pods[0], path)
|
||||||
directIO = true
|
directIO = true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -617,17 +616,17 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
|||||||
if i != 0 {
|
if i != 0 {
|
||||||
ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
|
ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
|
||||||
// For 1st pod, no one has written data yet, so pass the read check
|
// For 1st pod, no one has written data yet, so pass the read check
|
||||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
storageutils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the seed and check if write/read works properly
|
// Update the seed and check if write/read works properly
|
||||||
seed = time.Now().UTC().UnixNano()
|
seed = time.Now().UTC().UnixNano()
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index))
|
ginkgo.By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index))
|
||||||
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
storageutils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index))
|
ginkgo.By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index))
|
||||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
storageutils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pods) < 2 {
|
if len(pods) < 2 {
|
||||||
@ -656,16 +655,16 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
|||||||
} else {
|
} else {
|
||||||
ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1))
|
ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1))
|
||||||
}
|
}
|
||||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
storageutils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
||||||
|
|
||||||
// Update the seed and check if write/read works properly
|
// Update the seed and check if write/read works properly
|
||||||
seed = time.Now().UTC().UnixNano()
|
seed = time.Now().UTC().UnixNano()
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index))
|
ginkgo.By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index))
|
||||||
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
storageutils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index))
|
ginkgo.By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index))
|
||||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
storageutils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,7 +39,6 @@ import (
|
|||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
|
||||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
admissionapi "k8s.io/pod-security-admission/api"
|
admissionapi "k8s.io/pod-security-admission/api"
|
||||||
)
|
)
|
||||||
@ -442,11 +441,11 @@ func deleteVolumeSnapshot(f *framework.Framework, dc dynamic.Interface, sr *stor
|
|||||||
switch pattern.SnapshotDeletionPolicy {
|
switch pattern.SnapshotDeletionPolicy {
|
||||||
case storageframework.DeleteSnapshot:
|
case storageframework.DeleteSnapshot:
|
||||||
ginkgo.By("checking the SnapshotContent has been deleted")
|
ginkgo.By("checking the SnapshotContent has been deleted")
|
||||||
err = utils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
|
err = storageutils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
case storageframework.RetainSnapshot:
|
case storageframework.RetainSnapshot:
|
||||||
ginkgo.By("checking the SnapshotContent has not been deleted")
|
ginkgo.By("checking the SnapshotContent has not been deleted")
|
||||||
err = utils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */)
|
err = storageutils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */)
|
||||||
framework.ExpectError(err)
|
framework.ExpectError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,6 @@ import (
|
|||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
|
||||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
admissionapi "k8s.io/pod-security-admission/api"
|
admissionapi "k8s.io/pod-security-admission/api"
|
||||||
@ -101,7 +100,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
|
|||||||
config *storageframework.PerTestConfig
|
config *storageframework.PerTestConfig
|
||||||
driverCleanup func()
|
driverCleanup func()
|
||||||
|
|
||||||
hostExec utils.HostExec
|
hostExec storageutils.HostExec
|
||||||
resource *storageframework.VolumeResource
|
resource *storageframework.VolumeResource
|
||||||
roVolSource *v1.VolumeSource
|
roVolSource *v1.VolumeSource
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
@ -127,7 +126,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
|
|||||||
l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), driver.GetDriverInfo().InTreePluginName)
|
l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), driver.GetDriverInfo().InTreePluginName)
|
||||||
testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
|
testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
|
||||||
l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||||
l.hostExec = utils.NewHostExec(f)
|
l.hostExec = storageutils.NewHostExec(f)
|
||||||
|
|
||||||
// Setup subPath test dependent resource
|
// Setup subPath test dependent resource
|
||||||
volType := pattern.VolType
|
volType := pattern.VolType
|
||||||
@ -962,7 +961,7 @@ func TestPodContainerRestartWithConfigmapModified(f *framework.Framework, origin
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec, pod *v1.Pod, forceDelete bool) {
|
func testSubpathReconstruction(f *framework.Framework, hostExec storageutils.HostExec, pod *v1.Pod, forceDelete bool) {
|
||||||
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
|
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
|
||||||
|
|
||||||
// Disruptive test run serially, we can cache all voluem global mount
|
// Disruptive test run serially, we can cache all voluem global mount
|
||||||
@ -971,7 +970,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec,
|
|||||||
framework.ExpectNoError(err, "while listing schedulable nodes")
|
framework.ExpectNoError(err, "while listing schedulable nodes")
|
||||||
globalMountPointsByNode := make(map[string]sets.String, len(nodeList.Items))
|
globalMountPointsByNode := make(map[string]sets.String, len(nodeList.Items))
|
||||||
for _, node := range nodeList.Items {
|
for _, node := range nodeList.Items {
|
||||||
globalMountPointsByNode[node.Name] = utils.FindVolumeGlobalMountPoints(hostExec, &node)
|
globalMountPointsByNode[node.Name] = storageutils.FindVolumeGlobalMountPoints(hostExec, &node)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Change to busybox
|
// Change to busybox
|
||||||
@ -1004,11 +1003,11 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec,
|
|||||||
}
|
}
|
||||||
framework.ExpectNotEqual(podNode, nil, "pod node should exist in schedulable nodes")
|
framework.ExpectNotEqual(podNode, nil, "pod node should exist in schedulable nodes")
|
||||||
|
|
||||||
utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
|
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
|
||||||
|
|
||||||
if podNode != nil {
|
if podNode != nil {
|
||||||
mountPoints := globalMountPointsByNode[podNode.Name]
|
mountPoints := globalMountPointsByNode[podNode.Name]
|
||||||
mountPointsAfter := utils.FindVolumeGlobalMountPoints(hostExec, podNode)
|
mountPointsAfter := storageutils.FindVolumeGlobalMountPoints(hostExec, podNode)
|
||||||
s1 := mountPointsAfter.Difference(mountPoints)
|
s1 := mountPointsAfter.Difference(mountPoints)
|
||||||
s2 := mountPoints.Difference(mountPointsAfter)
|
s2 := mountPoints.Difference(mountPointsAfter)
|
||||||
gomega.Expect(s1).To(gomega.BeEmpty(), "global mount points leaked: %v", s1)
|
gomega.Expect(s1).To(gomega.BeEmpty(), "global mount points leaked: %v", s1)
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
|
||||||
e2eframework "k8s.io/kubernetes/test/e2e/framework"
|
e2eframework "k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
)
|
)
|
||||||
@ -49,7 +48,7 @@ import (
|
|||||||
//
|
//
|
||||||
// Driver deployments that are different will have to do the patching
|
// Driver deployments that are different will have to do the patching
|
||||||
// without this function, or skip patching entirely.
|
// without this function, or skip patching entirely.
|
||||||
func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interface{}) error {
|
func PatchCSIDeployment(f *e2eframework.Framework, o PatchCSIOptions, object interface{}) error {
|
||||||
rename := o.OldDriverName != "" && o.NewDriverName != "" &&
|
rename := o.OldDriverName != "" && o.NewDriverName != "" &&
|
||||||
o.OldDriverName != o.NewDriverName
|
o.OldDriverName != o.NewDriverName
|
||||||
|
|
||||||
|
@ -36,12 +36,10 @@ import (
|
|||||||
admissionreviewv1 "k8s.io/api/admission/v1"
|
admissionreviewv1 "k8s.io/api/admission/v1"
|
||||||
"k8s.io/api/admission/v1beta1"
|
"k8s.io/api/admission/v1beta1"
|
||||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||||
admissionv1 "k8s.io/api/admissionregistration/v1"
|
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
|
||||||
admissionv1beta1 "k8s.io/api/admissionregistration/v1beta1"
|
|
||||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||||
authenticationv1 "k8s.io/api/authentication/v1"
|
authenticationv1 "k8s.io/api/authentication/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||||
policyv1 "k8s.io/api/policy/v1"
|
policyv1 "k8s.io/api/policy/v1"
|
||||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||||
@ -54,7 +52,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||||
dynamic "k8s.io/client-go/dynamic"
|
"k8s.io/client-go/dynamic"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/util/retry"
|
"k8s.io/client-go/util/retry"
|
||||||
@ -513,7 +511,7 @@ func testWebhookAdmission(t *testing.T, watchCache bool) {
|
|||||||
// create CRDs
|
// create CRDs
|
||||||
etcd.CreateTestCRDs(t, apiextensionsclientset.NewForConfigOrDie(server.ClientConfig), false, etcd.GetCustomResourceDefinitionData()...)
|
etcd.CreateTestCRDs(t, apiextensionsclientset.NewForConfigOrDie(server.ClientConfig), false, etcd.GetCustomResourceDefinitionData()...)
|
||||||
|
|
||||||
if _, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}, metav1.CreateOptions{}); err != nil {
|
if _, err := client.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}, metav1.CreateOptions{}); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -577,8 +575,8 @@ func testWebhookAdmission(t *testing.T, watchCache bool) {
|
|||||||
// Note: this only works because there are no overlapping resource names in-process that are not co-located
|
// Note: this only works because there are no overlapping resource names in-process that are not co-located
|
||||||
convertedResources := map[string]schema.GroupVersionResource{}
|
convertedResources := map[string]schema.GroupVersionResource{}
|
||||||
// build the webhook rules enumerating the specific group/version/resources we want
|
// build the webhook rules enumerating the specific group/version/resources we want
|
||||||
convertedV1beta1Rules := []admissionv1beta1.RuleWithOperations{}
|
convertedV1beta1Rules := []admissionregistrationv1beta1.RuleWithOperations{}
|
||||||
convertedV1Rules := []admissionv1.RuleWithOperations{}
|
convertedV1Rules := []admissionregistrationv1.RuleWithOperations{}
|
||||||
for _, gvr := range gvrsToTest {
|
for _, gvr := range gvrsToTest {
|
||||||
metaGVR := metav1.GroupVersionResource{Group: gvr.Group, Version: gvr.Version, Resource: gvr.Resource}
|
metaGVR := metav1.GroupVersionResource{Group: gvr.Group, Version: gvr.Version, Resource: gvr.Resource}
|
||||||
|
|
||||||
@ -589,13 +587,13 @@ func testWebhookAdmission(t *testing.T, watchCache bool) {
|
|||||||
convertedGVR = gvr
|
convertedGVR = gvr
|
||||||
convertedResources[gvr.Resource] = gvr
|
convertedResources[gvr.Resource] = gvr
|
||||||
// add an admission rule indicating we can receive this version
|
// add an admission rule indicating we can receive this version
|
||||||
convertedV1beta1Rules = append(convertedV1beta1Rules, admissionv1beta1.RuleWithOperations{
|
convertedV1beta1Rules = append(convertedV1beta1Rules, admissionregistrationv1beta1.RuleWithOperations{
|
||||||
Operations: []admissionv1beta1.OperationType{admissionv1beta1.OperationAll},
|
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.OperationAll},
|
||||||
Rule: admissionv1beta1.Rule{APIGroups: []string{gvr.Group}, APIVersions: []string{gvr.Version}, Resources: []string{gvr.Resource}},
|
Rule: admissionregistrationv1beta1.Rule{APIGroups: []string{gvr.Group}, APIVersions: []string{gvr.Version}, Resources: []string{gvr.Resource}},
|
||||||
})
|
})
|
||||||
convertedV1Rules = append(convertedV1Rules, admissionv1.RuleWithOperations{
|
convertedV1Rules = append(convertedV1Rules, admissionregistrationv1.RuleWithOperations{
|
||||||
Operations: []admissionv1.OperationType{admissionv1.OperationAll},
|
Operations: []admissionregistrationv1.OperationType{admissionregistrationv1.OperationAll},
|
||||||
Rule: admissionv1.Rule{APIGroups: []string{gvr.Group}, APIVersions: []string{gvr.Version}, Resources: []string{gvr.Resource}},
|
Rule: admissionregistrationv1.Rule{APIGroups: []string{gvr.Group}, APIVersions: []string{gvr.Version}, Resources: []string{gvr.Resource}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1533,28 +1531,28 @@ func shouldTestResourceVerb(gvr schema.GroupVersionResource, resource metav1.API
|
|||||||
// webhook registration helpers
|
// webhook registration helpers
|
||||||
//
|
//
|
||||||
|
|
||||||
func createV1beta1ValidationWebhook(etcdClient *clientv3.Client, etcdStoragePrefix string, client clientset.Interface, endpoint, convertedEndpoint string, convertedRules []admissionv1beta1.RuleWithOperations) error {
|
func createV1beta1ValidationWebhook(etcdClient *clientv3.Client, etcdStoragePrefix string, client clientset.Interface, endpoint, convertedEndpoint string, convertedRules []admissionregistrationv1beta1.RuleWithOperations) error {
|
||||||
fail := admissionv1beta1.Fail
|
fail := admissionregistrationv1beta1.Fail
|
||||||
equivalent := admissionv1beta1.Equivalent
|
equivalent := admissionregistrationv1beta1.Equivalent
|
||||||
webhookConfig := &admissionv1beta1.ValidatingWebhookConfiguration{
|
webhookConfig := &admissionregistrationv1beta1.ValidatingWebhookConfiguration{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "admission.integration.test"},
|
ObjectMeta: metav1.ObjectMeta{Name: "admission.integration.test"},
|
||||||
Webhooks: []admissionv1beta1.ValidatingWebhook{
|
Webhooks: []admissionregistrationv1beta1.ValidatingWebhook{
|
||||||
{
|
{
|
||||||
Name: "admission.integration.test",
|
Name: "admission.integration.test",
|
||||||
ClientConfig: admissionv1beta1.WebhookClientConfig{
|
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||||
URL: &endpoint,
|
URL: &endpoint,
|
||||||
CABundle: localhostCert,
|
CABundle: localhostCert,
|
||||||
},
|
},
|
||||||
Rules: []admissionv1beta1.RuleWithOperations{{
|
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||||
Operations: []admissionv1beta1.OperationType{admissionv1beta1.OperationAll},
|
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.OperationAll},
|
||||||
Rule: admissionv1beta1.Rule{APIGroups: []string{"*"}, APIVersions: []string{"*"}, Resources: []string{"*/*"}},
|
Rule: admissionregistrationv1beta1.Rule{APIGroups: []string{"*"}, APIVersions: []string{"*"}, Resources: []string{"*/*"}},
|
||||||
}},
|
}},
|
||||||
FailurePolicy: &fail,
|
FailurePolicy: &fail,
|
||||||
AdmissionReviewVersions: []string{"v1beta1"},
|
AdmissionReviewVersions: []string{"v1beta1"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "admission.integration.testconversion",
|
Name: "admission.integration.testconversion",
|
||||||
ClientConfig: admissionv1beta1.WebhookClientConfig{
|
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||||
URL: &convertedEndpoint,
|
URL: &convertedEndpoint,
|
||||||
CABundle: localhostCert,
|
CABundle: localhostCert,
|
||||||
},
|
},
|
||||||
@ -1586,28 +1584,28 @@ func createV1beta1ValidationWebhook(etcdClient *clientv3.Client, etcdStoragePref
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createV1beta1MutationWebhook(etcdClient *clientv3.Client, etcdStoragePrefix string, client clientset.Interface, endpoint, convertedEndpoint string, convertedRules []admissionv1beta1.RuleWithOperations) error {
|
func createV1beta1MutationWebhook(etcdClient *clientv3.Client, etcdStoragePrefix string, client clientset.Interface, endpoint, convertedEndpoint string, convertedRules []admissionregistrationv1beta1.RuleWithOperations) error {
|
||||||
fail := admissionv1beta1.Fail
|
fail := admissionregistrationv1beta1.Fail
|
||||||
equivalent := admissionv1beta1.Equivalent
|
equivalent := admissionregistrationv1beta1.Equivalent
|
||||||
webhookConfig := &admissionv1beta1.MutatingWebhookConfiguration{
|
webhookConfig := &admissionregistrationv1beta1.MutatingWebhookConfiguration{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "mutation.integration.test"},
|
ObjectMeta: metav1.ObjectMeta{Name: "mutation.integration.test"},
|
||||||
Webhooks: []admissionv1beta1.MutatingWebhook{
|
Webhooks: []admissionregistrationv1beta1.MutatingWebhook{
|
||||||
{
|
{
|
||||||
Name: "mutation.integration.test",
|
Name: "mutation.integration.test",
|
||||||
ClientConfig: admissionv1beta1.WebhookClientConfig{
|
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||||
URL: &endpoint,
|
URL: &endpoint,
|
||||||
CABundle: localhostCert,
|
CABundle: localhostCert,
|
||||||
},
|
},
|
||||||
Rules: []admissionv1beta1.RuleWithOperations{{
|
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||||
Operations: []admissionv1beta1.OperationType{admissionv1beta1.OperationAll},
|
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.OperationAll},
|
||||||
Rule: admissionv1beta1.Rule{APIGroups: []string{"*"}, APIVersions: []string{"*"}, Resources: []string{"*/*"}},
|
Rule: admissionregistrationv1beta1.Rule{APIGroups: []string{"*"}, APIVersions: []string{"*"}, Resources: []string{"*/*"}},
|
||||||
}},
|
}},
|
||||||
FailurePolicy: &fail,
|
FailurePolicy: &fail,
|
||||||
AdmissionReviewVersions: []string{"v1beta1"},
|
AdmissionReviewVersions: []string{"v1beta1"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "mutation.integration.testconversion",
|
Name: "mutation.integration.testconversion",
|
||||||
ClientConfig: admissionv1beta1.WebhookClientConfig{
|
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||||
URL: &convertedEndpoint,
|
URL: &convertedEndpoint,
|
||||||
CABundle: localhostCert,
|
CABundle: localhostCert,
|
||||||
},
|
},
|
||||||
@ -1639,31 +1637,31 @@ func createV1beta1MutationWebhook(etcdClient *clientv3.Client, etcdStoragePrefix
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createV1ValidationWebhook(client clientset.Interface, endpoint, convertedEndpoint string, convertedRules []admissionv1.RuleWithOperations) error {
|
func createV1ValidationWebhook(client clientset.Interface, endpoint, convertedEndpoint string, convertedRules []admissionregistrationv1.RuleWithOperations) error {
|
||||||
fail := admissionv1.Fail
|
fail := admissionregistrationv1.Fail
|
||||||
equivalent := admissionv1.Equivalent
|
equivalent := admissionregistrationv1.Equivalent
|
||||||
none := admissionv1.SideEffectClassNone
|
none := admissionregistrationv1.SideEffectClassNone
|
||||||
// Attaching Admission webhook to API server
|
// Attaching Admission webhook to API server
|
||||||
_, err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(context.TODO(), &admissionv1.ValidatingWebhookConfiguration{
|
_, err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(context.TODO(), &admissionregistrationv1.ValidatingWebhookConfiguration{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "admissionv1.integration.test"},
|
ObjectMeta: metav1.ObjectMeta{Name: "admissionregistrationv1.integration.test"},
|
||||||
Webhooks: []admissionv1.ValidatingWebhook{
|
Webhooks: []admissionregistrationv1.ValidatingWebhook{
|
||||||
{
|
{
|
||||||
Name: "admissionv1.integration.test",
|
Name: "admissionregistrationv1.integration.test",
|
||||||
ClientConfig: admissionv1.WebhookClientConfig{
|
ClientConfig: admissionregistrationv1.WebhookClientConfig{
|
||||||
URL: &endpoint,
|
URL: &endpoint,
|
||||||
CABundle: localhostCert,
|
CABundle: localhostCert,
|
||||||
},
|
},
|
||||||
Rules: []admissionv1.RuleWithOperations{{
|
Rules: []admissionregistrationv1.RuleWithOperations{{
|
||||||
Operations: []admissionv1.OperationType{admissionv1.OperationAll},
|
Operations: []admissionregistrationv1.OperationType{admissionregistrationv1.OperationAll},
|
||||||
Rule: admissionv1.Rule{APIGroups: []string{"*"}, APIVersions: []string{"*"}, Resources: []string{"*/*"}},
|
Rule: admissionregistrationv1.Rule{APIGroups: []string{"*"}, APIVersions: []string{"*"}, Resources: []string{"*/*"}},
|
||||||
}},
|
}},
|
||||||
FailurePolicy: &fail,
|
FailurePolicy: &fail,
|
||||||
AdmissionReviewVersions: []string{"v1", "v1beta1"},
|
AdmissionReviewVersions: []string{"v1", "v1beta1"},
|
||||||
SideEffects: &none,
|
SideEffects: &none,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "admissionv1.integration.testconversion",
|
Name: "admissionregistrationv1.integration.testconversion",
|
||||||
ClientConfig: admissionv1.WebhookClientConfig{
|
ClientConfig: admissionregistrationv1.WebhookClientConfig{
|
||||||
URL: &convertedEndpoint,
|
URL: &convertedEndpoint,
|
||||||
CABundle: localhostCert,
|
CABundle: localhostCert,
|
||||||
},
|
},
|
||||||
@ -1678,23 +1676,23 @@ func createV1ValidationWebhook(client clientset.Interface, endpoint, convertedEn
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func createV1MutationWebhook(client clientset.Interface, endpoint, convertedEndpoint string, convertedRules []admissionv1.RuleWithOperations) error {
|
func createV1MutationWebhook(client clientset.Interface, endpoint, convertedEndpoint string, convertedRules []admissionregistrationv1.RuleWithOperations) error {
|
||||||
fail := admissionv1.Fail
|
fail := admissionregistrationv1.Fail
|
||||||
equivalent := admissionv1.Equivalent
|
equivalent := admissionregistrationv1.Equivalent
|
||||||
none := admissionv1.SideEffectClassNone
|
none := admissionregistrationv1.SideEffectClassNone
|
||||||
// Attaching Mutation webhook to API server
|
// Attaching Mutation webhook to API server
|
||||||
_, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.TODO(), &admissionv1.MutatingWebhookConfiguration{
|
_, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.TODO(), &admissionregistrationv1.MutatingWebhookConfiguration{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "mutationv1.integration.test"},
|
ObjectMeta: metav1.ObjectMeta{Name: "mutationv1.integration.test"},
|
||||||
Webhooks: []admissionv1.MutatingWebhook{
|
Webhooks: []admissionregistrationv1.MutatingWebhook{
|
||||||
{
|
{
|
||||||
Name: "mutationv1.integration.test",
|
Name: "mutationv1.integration.test",
|
||||||
ClientConfig: admissionv1.WebhookClientConfig{
|
ClientConfig: admissionregistrationv1.WebhookClientConfig{
|
||||||
URL: &endpoint,
|
URL: &endpoint,
|
||||||
CABundle: localhostCert,
|
CABundle: localhostCert,
|
||||||
},
|
},
|
||||||
Rules: []admissionv1.RuleWithOperations{{
|
Rules: []admissionregistrationv1.RuleWithOperations{{
|
||||||
Operations: []admissionv1.OperationType{admissionv1.OperationAll},
|
Operations: []admissionregistrationv1.OperationType{admissionregistrationv1.OperationAll},
|
||||||
Rule: admissionv1.Rule{APIGroups: []string{"*"}, APIVersions: []string{"*"}, Resources: []string{"*/*"}},
|
Rule: admissionregistrationv1.Rule{APIGroups: []string{"*"}, APIVersions: []string{"*"}, Resources: []string{"*/*"}},
|
||||||
}},
|
}},
|
||||||
FailurePolicy: &fail,
|
FailurePolicy: &fail,
|
||||||
AdmissionReviewVersions: []string{"v1", "v1beta1"},
|
AdmissionReviewVersions: []string{"v1", "v1beta1"},
|
||||||
@ -1702,7 +1700,7 @@ func createV1MutationWebhook(client clientset.Interface, endpoint, convertedEndp
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "mutationv1.integration.testconversion",
|
Name: "mutationv1.integration.testconversion",
|
||||||
ClientConfig: admissionv1.WebhookClientConfig{
|
ClientConfig: admissionregistrationv1.WebhookClientConfig{
|
||||||
URL: &convertedEndpoint,
|
URL: &convertedEndpoint,
|
||||||
CABundle: localhostCert,
|
CABundle: localhostCert,
|
||||||
},
|
},
|
||||||
|
@ -34,7 +34,6 @@ import (
|
|||||||
"k8s.io/api/admission/v1beta1"
|
"k8s.io/api/admission/v1beta1"
|
||||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
@ -160,7 +159,7 @@ plugins:
|
|||||||
|
|
||||||
upCh := recorder.Reset()
|
upCh := recorder.Reset()
|
||||||
ns := "load-balance"
|
ns := "load-balance"
|
||||||
_, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{})
|
_, err = client.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -293,7 +292,7 @@ var clientAuthMarkerFixture = &corev1.Pod{
|
|||||||
Name: "marker",
|
Name: "marker",
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []v1.Container{{
|
Containers: []corev1.Container{{
|
||||||
Name: "fake-name",
|
Name: "fake-name",
|
||||||
Image: "fakeimage",
|
Image: "fakeimage",
|
||||||
}},
|
}},
|
||||||
|
@ -33,7 +33,6 @@ import (
|
|||||||
"k8s.io/api/admission/v1beta1"
|
"k8s.io/api/admission/v1beta1"
|
||||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
@ -109,7 +108,7 @@ func TestWebhookLoadBalance(t *testing.T) {
|
|||||||
|
|
||||||
upCh := recorder.Reset()
|
upCh := recorder.Reset()
|
||||||
ns := "load-balance"
|
ns := "load-balance"
|
||||||
_, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{})
|
_, err = client.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -163,7 +162,7 @@ func TestWebhookLoadBalance(t *testing.T) {
|
|||||||
GenerateName: "loadbalance-",
|
GenerateName: "loadbalance-",
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []v1.Container{{
|
Containers: []corev1.Container{{
|
||||||
Name: "fake-name",
|
Name: "fake-name",
|
||||||
Image: "fakeimage",
|
Image: "fakeimage",
|
||||||
}},
|
}},
|
||||||
@ -287,7 +286,7 @@ var loadBalanceMarkerFixture = &corev1.Pod{
|
|||||||
Name: "marker",
|
Name: "marker",
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []v1.Container{{
|
Containers: []corev1.Container{{
|
||||||
Name: "fake-name",
|
Name: "fake-name",
|
||||||
Image: "fakeimage",
|
Image: "fakeimage",
|
||||||
}},
|
}},
|
||||||
|
@ -36,7 +36,6 @@ import (
|
|||||||
"k8s.io/api/admission/v1beta1"
|
"k8s.io/api/admission/v1beta1"
|
||||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -319,7 +318,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) {
|
|||||||
testCaseID := strconv.Itoa(i)
|
testCaseID := strconv.Itoa(i)
|
||||||
ns := "reinvoke-" + testCaseID
|
ns := "reinvoke-" + testCaseID
|
||||||
nsLabels := map[string]string{"test-case": testCaseID}
|
nsLabels := map[string]string{"test-case": testCaseID}
|
||||||
_, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns, Labels: nsLabels}}, metav1.CreateOptions{})
|
_, err = client.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns, Labels: nsLabels}}, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -327,7 +326,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) {
|
|||||||
// Write markers to a separate namespace to avoid cross-talk
|
// Write markers to a separate namespace to avoid cross-talk
|
||||||
markerNs := ns + "-markers"
|
markerNs := ns + "-markers"
|
||||||
markerNsLabels := map[string]string{"test-markers": testCaseID}
|
markerNsLabels := map[string]string{"test-markers": testCaseID}
|
||||||
_, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: markerNs, Labels: markerNsLabels}}, metav1.CreateOptions{})
|
_, err = client.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: markerNs, Labels: markerNsLabels}}, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -414,7 +413,7 @@ func testWebhookReinvocationPolicy(t *testing.T, watchCache bool) {
|
|||||||
Labels: map[string]string{"x": "true"},
|
Labels: map[string]string{"x": "true"},
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []v1.Container{{
|
Containers: []corev1.Container{{
|
||||||
Name: "fake-name",
|
Name: "fake-name",
|
||||||
Image: "fakeimage",
|
Image: "fakeimage",
|
||||||
}},
|
}},
|
||||||
@ -637,7 +636,7 @@ func newReinvocationMarkerFixture(namespace string) *corev1.Pod {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []v1.Container{{
|
Containers: []corev1.Container{{
|
||||||
Name: "fake-name",
|
Name: "fake-name",
|
||||||
Image: "fakeimage",
|
Image: "fakeimage",
|
||||||
}},
|
}},
|
||||||
|
@ -34,7 +34,6 @@ import (
|
|||||||
"k8s.io/api/admission/v1beta1"
|
"k8s.io/api/admission/v1beta1"
|
||||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
@ -190,7 +189,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) {
|
|||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
recorder.Reset()
|
recorder.Reset()
|
||||||
ns := fmt.Sprintf("reinvoke-%d", i)
|
ns := fmt.Sprintf("reinvoke-%d", i)
|
||||||
_, err = client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{})
|
_, err = client.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -290,7 +289,7 @@ func testWebhookTimeout(t *testing.T, watchCache bool) {
|
|||||||
Labels: map[string]string{"x": "true"},
|
Labels: map[string]string{"x": "true"},
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []v1.Container{{
|
Containers: []corev1.Container{{
|
||||||
Name: "fake-name",
|
Name: "fake-name",
|
||||||
Image: "fakeimage",
|
Image: "fakeimage",
|
||||||
}},
|
}},
|
||||||
@ -476,7 +475,7 @@ var timeoutMarkerFixture = &corev1.Pod{
|
|||||||
Name: "marker",
|
Name: "marker",
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
Containers: []v1.Container{{
|
Containers: []corev1.Container{{
|
||||||
Name: "fake-name",
|
Name: "fake-name",
|
||||||
Image: "fakeimage",
|
Image: "fakeimage",
|
||||||
}},
|
}},
|
||||||
|
@ -40,7 +40,6 @@ import (
|
|||||||
yamlutil "k8s.io/apimachinery/pkg/util/yaml"
|
yamlutil "k8s.io/apimachinery/pkg/util/yaml"
|
||||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
@ -2552,7 +2551,7 @@ func BenchmarkServerSideApply(b *testing.B) {
|
|||||||
benchAll(b, client, decodePod(podBytesWhenEnabled))
|
benchAll(b, client, decodePod(podBytesWhenEnabled))
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchAll(b *testing.B, client kubernetes.Interface, pod v1.Pod) {
|
func benchAll(b *testing.B, client clientset.Interface, pod v1.Pod) {
|
||||||
// Make sure pod is ready to post
|
// Make sure pod is ready to post
|
||||||
pod.ObjectMeta.CreationTimestamp = metav1.Time{}
|
pod.ObjectMeta.CreationTimestamp = metav1.Time{}
|
||||||
pod.ObjectMeta.ResourceVersion = ""
|
pod.ObjectMeta.ResourceVersion = ""
|
||||||
@ -2580,7 +2579,7 @@ func benchAll(b *testing.B, client kubernetes.Interface, pod v1.Pod) {
|
|||||||
b.Run("Post50", benchPostPod(client, pod, 50))
|
b.Run("Post50", benchPostPod(client, pod, 50))
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchPostPod(client kubernetes.Interface, pod v1.Pod, parallel int) func(*testing.B) {
|
func benchPostPod(client clientset.Interface, pod v1.Pod, parallel int) func(*testing.B) {
|
||||||
return func(b *testing.B) {
|
return func(b *testing.B) {
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
@ -2610,7 +2609,7 @@ func benchPostPod(client kubernetes.Interface, pod v1.Pod, parallel int) func(*t
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createNamespace(client kubernetes.Interface, name string) error {
|
func createNamespace(client clientset.Interface, name string) error {
|
||||||
namespace := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}}
|
namespace := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}}
|
||||||
namespaceBytes, err := yaml.Marshal(namespace)
|
namespaceBytes, err := yaml.Marshal(namespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2626,7 +2625,7 @@ func createNamespace(client kubernetes.Interface, name string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchListPod(client kubernetes.Interface, pod v1.Pod, num int) func(*testing.B) {
|
func benchListPod(client clientset.Interface, pod v1.Pod, num int) func(*testing.B) {
|
||||||
return func(b *testing.B) {
|
return func(b *testing.B) {
|
||||||
namespace := fmt.Sprintf("get-%d-%d", num, b.N)
|
namespace := fmt.Sprintf("get-%d-%d", num, b.N)
|
||||||
if err := createNamespace(client, namespace); err != nil {
|
if err := createNamespace(client, namespace); err != nil {
|
||||||
@ -2661,7 +2660,7 @@ func benchListPod(client kubernetes.Interface, pod v1.Pod, num int) func(*testin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchRepeatedUpdate(client kubernetes.Interface, podName string) func(*testing.B) {
|
func benchRepeatedUpdate(client clientset.Interface, podName string) func(*testing.B) {
|
||||||
return func(b *testing.B) {
|
return func(b *testing.B) {
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
@ -152,7 +151,7 @@ const (
|
|||||||
timeout = 5 * time.Second
|
timeout = 5 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
func waitForCertificateRequestApproved(client kubernetes.Interface, name string) error {
|
func waitForCertificateRequestApproved(client clientset.Interface, name string) error {
|
||||||
if err := wait.Poll(interval, timeout, func() (bool, error) {
|
if err := wait.Poll(interval, timeout, func() (bool, error) {
|
||||||
csr, err := client.CertificatesV1().CertificateSigningRequests().Get(context.TODO(), name, metav1.GetOptions{})
|
csr, err := client.CertificatesV1().CertificateSigningRequests().Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -168,7 +167,7 @@ func waitForCertificateRequestApproved(client kubernetes.Interface, name string)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ensureCertificateRequestNotApproved(client kubernetes.Interface, name string) error {
|
func ensureCertificateRequestNotApproved(client clientset.Interface, name string) error {
|
||||||
// If waiting for the CSR to be approved times out, we class this as 'not auto approved'.
|
// If waiting for the CSR to be approved times out, we class this as 'not auto approved'.
|
||||||
// There is currently no way to explicitly check if the CSR has been rejected for auto-approval.
|
// There is currently no way to explicitly check if the CSR has been rejected for auto-approval.
|
||||||
err := waitForCertificateRequestApproved(client, name)
|
err := waitForCertificateRequestApproved(client, name)
|
||||||
|
@ -40,7 +40,6 @@ import (
|
|||||||
"k8s.io/apiserver/pkg/admission/plugin/webhook/mutating"
|
"k8s.io/apiserver/pkg/admission/plugin/webhook/mutating"
|
||||||
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||||
auditv1 "k8s.io/apiserver/pkg/apis/audit/v1"
|
auditv1 "k8s.io/apiserver/pkg/apis/audit/v1"
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
@ -258,7 +257,7 @@ func runTestWithVersion(t *testing.T, version string) {
|
|||||||
framework.SharedEtcd())
|
framework.SharedEtcd())
|
||||||
defer result.TearDownFn()
|
defer result.TearDownFn()
|
||||||
|
|
||||||
kubeclient, err := kubernetes.NewForConfig(result.ClientConfig)
|
kubeclient, err := clientset.NewForConfig(result.ClientConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -391,7 +390,7 @@ func runTestWithVersion(t *testing.T, version string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAudit(t *testing.T, version string, level auditinternal.Level, enableMutatingWebhook bool, namespace string, kubeclient kubernetes.Interface, logFile *os.File) {
|
func testAudit(t *testing.T, version string, level auditinternal.Level, enableMutatingWebhook bool, namespace string, kubeclient clientset.Interface, logFile *os.File) {
|
||||||
var lastMissingReport string
|
var lastMissingReport string
|
||||||
createNamespace(t, kubeclient, namespace)
|
createNamespace(t, kubeclient, namespace)
|
||||||
|
|
||||||
@ -419,7 +418,7 @@ func testAudit(t *testing.T, version string, level auditinternal.Level, enableMu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAuditCrossGroupSubResource(t *testing.T, version string, expEvents []utils.AuditEvent, namespace string, kubeclient kubernetes.Interface, logFile *os.File) {
|
func testAuditCrossGroupSubResource(t *testing.T, version string, expEvents []utils.AuditEvent, namespace string, kubeclient clientset.Interface, logFile *os.File) {
|
||||||
var (
|
var (
|
||||||
lastMissingReport string
|
lastMissingReport string
|
||||||
sa *apiv1.ServiceAccount
|
sa *apiv1.ServiceAccount
|
||||||
@ -526,7 +525,7 @@ func getExpectedEvents(level auditinternal.Level, enableMutatingWebhook bool, na
|
|||||||
// configMapOperations is a set of known operations performed on the configmap type
|
// configMapOperations is a set of known operations performed on the configmap type
|
||||||
// which correspond to the expected events.
|
// which correspond to the expected events.
|
||||||
// This is shared by the dynamic test
|
// This is shared by the dynamic test
|
||||||
func configMapOperations(t *testing.T, kubeclient kubernetes.Interface, namespace string) {
|
func configMapOperations(t *testing.T, kubeclient clientset.Interface, namespace string) {
|
||||||
// create, get, watch, update, patch, list and delete configmap.
|
// create, get, watch, update, patch, list and delete configmap.
|
||||||
configMap := &apiv1.ConfigMap{
|
configMap := &apiv1.ConfigMap{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -571,7 +570,7 @@ func configMapOperations(t *testing.T, kubeclient kubernetes.Interface, namespac
|
|||||||
expectNoError(t, err, "failed to delete audit-configmap")
|
expectNoError(t, err, "failed to delete audit-configmap")
|
||||||
}
|
}
|
||||||
|
|
||||||
func tokenRequestOperations(t *testing.T, kubeClient kubernetes.Interface, namespace, name string) {
|
func tokenRequestOperations(t *testing.T, kubeClient clientset.Interface, namespace, name string) {
|
||||||
var (
|
var (
|
||||||
treq = &authenticationv1.TokenRequest{
|
treq = &authenticationv1.TokenRequest{
|
||||||
Spec: authenticationv1.TokenRequestSpec{
|
Spec: authenticationv1.TokenRequestSpec{
|
||||||
@ -584,7 +583,7 @@ func tokenRequestOperations(t *testing.T, kubeClient kubernetes.Interface, names
|
|||||||
expectNoError(t, err, "failed to create audit-tokenRequest")
|
expectNoError(t, err, "failed to create audit-tokenRequest")
|
||||||
}
|
}
|
||||||
|
|
||||||
func scaleOperations(t *testing.T, kubeClient kubernetes.Interface, namespace, name string) {
|
func scaleOperations(t *testing.T, kubeClient clientset.Interface, namespace, name string) {
|
||||||
var (
|
var (
|
||||||
scale = &autoscalingv1.Scale{
|
scale = &autoscalingv1.Scale{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
@ -24,13 +24,11 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
discovery "k8s.io/api/discovery/v1"
|
discovery "k8s.io/api/discovery/v1"
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
"k8s.io/kubernetes/pkg/controller/endpoint"
|
"k8s.io/kubernetes/pkg/controller/endpoint"
|
||||||
@ -285,7 +283,7 @@ func TestEndpointSliceMirroringUpdates(t *testing.T) {
|
|||||||
{
|
{
|
||||||
testName: "Update addresses",
|
testName: "Update addresses",
|
||||||
tweakEndpoint: func(ep *corev1.Endpoints) {
|
tweakEndpoint: func(ep *corev1.Endpoints) {
|
||||||
ep.Subsets[0].Addresses = []v1.EndpointAddress{{IP: "1.2.3.4"}, {IP: "1.2.3.6"}}
|
ep.Subsets[0].Addresses = []corev1.EndpointAddress{{IP: "1.2.3.4"}, {IP: "1.2.3.6"}}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -530,7 +528,7 @@ func TestEndpointSliceMirroringSelectorTransition(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForMirroredSlices(t *testing.T, client *kubernetes.Clientset, nsName, svcName string, num int) error {
|
func waitForMirroredSlices(t *testing.T, client *clientset.Clientset, nsName, svcName string, num int) error {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
return wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
return wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
lSelector := discovery.LabelServiceName + "=" + svcName
|
lSelector := discovery.LabelServiceName + "=" + svcName
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apiserver/pkg/admission"
|
"k8s.io/apiserver/pkg/admission"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
@ -124,7 +123,7 @@ func TestTaintBasedEvictions(t *testing.T) {
|
|||||||
// Build clientset and informers for controllers.
|
// Build clientset and informers for controllers.
|
||||||
externalClientConfig := restclient.CopyConfig(testCtx.KubeConfig)
|
externalClientConfig := restclient.CopyConfig(testCtx.KubeConfig)
|
||||||
externalClientConfig.QPS = -1
|
externalClientConfig.QPS = -1
|
||||||
externalClientset := kubernetes.NewForConfigOrDie(externalClientConfig)
|
externalClientset := clientset.NewForConfigOrDie(externalClientConfig)
|
||||||
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
|
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
|
||||||
podTolerations.SetExternalKubeClientSet(externalClientset)
|
podTolerations.SetExternalKubeClientSet(externalClientset)
|
||||||
podTolerations.SetExternalKubeInformerFactory(externalInformers)
|
podTolerations.SetExternalKubeInformerFactory(externalInformers)
|
||||||
|
@ -34,7 +34,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
@ -623,7 +622,7 @@ func TestPodPriorityResolution(t *testing.T) {
|
|||||||
// Build clientset and informers for controllers.
|
// Build clientset and informers for controllers.
|
||||||
externalClientConfig := restclient.CopyConfig(testCtx.KubeConfig)
|
externalClientConfig := restclient.CopyConfig(testCtx.KubeConfig)
|
||||||
externalClientConfig.QPS = -1
|
externalClientConfig.QPS = -1
|
||||||
externalClientset := kubernetes.NewForConfigOrDie(externalClientConfig)
|
externalClientset := clientset.NewForConfigOrDie(externalClientConfig)
|
||||||
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
|
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
|
||||||
admission.SetExternalKubeClientSet(externalClientset)
|
admission.SetExternalKubeClientSet(externalClientset)
|
||||||
admission.SetExternalKubeInformerFactory(externalInformers)
|
admission.SetExternalKubeInformerFactory(externalInformers)
|
||||||
|
@ -50,7 +50,6 @@ import (
|
|||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||||
serviceaccountadmission "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
|
serviceaccountadmission "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
|
||||||
@ -103,7 +102,7 @@ func TestServiceAccountAutoCreate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
func TestServiceAccountTokenAutoCreate(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LegacyServiceAccountTokenNoAutoGeneration, false)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, kubefeatures.LegacyServiceAccountTokenNoAutoGeneration, false)()
|
||||||
c, _, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(t)
|
c, _, stopFunc, err := startServiceAccountTestServerAndWaitForCaches(t)
|
||||||
defer stopFunc()
|
defer stopFunc()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user