Merge pull request #109639 from Abirdcfly/fixduplicateimport

cleanup: remove all duplicate import
This commit is contained in:
Kubernetes Prow Robot
2022-07-18 16:55:23 -07:00
committed by GitHub
63 changed files with 447 additions and 518 deletions

View File

@@ -23,7 +23,6 @@ import (
discovery "k8s.io/api/discovery/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/controller/util/endpoint"
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
utilpointer "k8s.io/utils/pointer"
)
@@ -33,8 +32,8 @@ func TestNumEndpointsAndSlices(t *testing.T) {
p80 := int32(80)
p443 := int32(443)
pmKey80443 := endpointutil.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}, {Port: &p443}})
pmKey80 := endpointutil.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}})
pmKey80443 := endpoint.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}, {Port: &p443}})
pmKey80 := endpoint.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}})
spCacheEfficient := NewServicePortCache()
spCacheEfficient.Set(pmKey80, EfficiencyInfo{Endpoints: 45, Slices: 1})
@@ -66,8 +65,8 @@ func TestPlaceHolderSlice(t *testing.T) {
p80 := int32(80)
p443 := int32(443)
pmKey80443 := endpointutil.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}, {Port: &p443}})
pmKey80 := endpointutil.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}})
pmKey80443 := endpoint.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}, {Port: &p443}})
pmKey80 := endpoint.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}})
sp := NewServicePortCache()
sp.Set(pmKey80, EfficiencyInfo{Endpoints: 0, Slices: 1})
@@ -95,7 +94,7 @@ func benchmarkUpdateServicePortCache(b *testing.B, num int) {
ns := "benchmark"
httpKey := endpoint.NewPortMapKey([]discovery.EndpointPort{{Port: utilpointer.Int32Ptr(80)}})
httpsKey := endpoint.NewPortMapKey([]discovery.EndpointPort{{Port: utilpointer.Int32Ptr(443)}})
spCache := &ServicePortCache{items: map[endpointutil.PortMapKey]EfficiencyInfo{
spCache := &ServicePortCache{items: map[endpoint.PortMapKey]EfficiencyInfo{
httpKey: {
Endpoints: 182,
Slices: 2,

View File

@@ -20,8 +20,7 @@ import (
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -33,7 +32,7 @@ import (
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
api "k8s.io/kubernetes/pkg/apis/core"
helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/apis/discovery/validation"
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
"k8s.io/kubernetes/pkg/features"
@@ -41,7 +40,7 @@ import (
)
// podToEndpoint returns an Endpoint object generated from a Pod, a Node, and a Service for a particular addressType.
func podToEndpoint(pod *corev1.Pod, node *corev1.Node, service *corev1.Service, addressType discovery.AddressType) discovery.Endpoint {
func podToEndpoint(pod *v1.Pod, node *v1.Node, service *v1.Service, addressType discovery.AddressType) discovery.Endpoint {
serving := podutil.IsPodReady(pod)
terminating := pod.DeletionTimestamp != nil
// For compatibility reasons, "ready" should never be "true" if a pod is terminatng, unless
@@ -52,7 +51,7 @@ func podToEndpoint(pod *corev1.Pod, node *corev1.Node, service *corev1.Service,
Conditions: discovery.EndpointConditions{
Ready: &ready,
},
TargetRef: &corev1.ObjectReference{
TargetRef: &v1.ObjectReference{
Kind: "Pod",
Namespace: pod.ObjectMeta.Namespace,
Name: pod.ObjectMeta.Name,
@@ -69,8 +68,8 @@ func podToEndpoint(pod *corev1.Pod, node *corev1.Node, service *corev1.Service,
ep.NodeName = &pod.Spec.NodeName
}
if node != nil && node.Labels[corev1.LabelTopologyZone] != "" {
zone := node.Labels[corev1.LabelTopologyZone]
if node != nil && node.Labels[v1.LabelTopologyZone] != "" {
zone := node.Labels[v1.LabelTopologyZone]
ep.Zone = &zone
}
@@ -83,7 +82,7 @@ func podToEndpoint(pod *corev1.Pod, node *corev1.Node, service *corev1.Service,
// getEndpointPorts returns a list of EndpointPorts generated from a Service
// and Pod.
func getEndpointPorts(service *corev1.Service, pod *corev1.Pod) []discovery.EndpointPort {
func getEndpointPorts(service *v1.Service, pod *v1.Pod) []discovery.EndpointPort {
endpointPorts := []discovery.EndpointPort{}
// Allow headless service not to have ports.
@@ -115,7 +114,7 @@ func getEndpointPorts(service *corev1.Service, pod *corev1.Pod) []discovery.Endp
}
// getEndpointAddresses returns a list of addresses generated from a pod status.
func getEndpointAddresses(podStatus corev1.PodStatus, service *corev1.Service, addressType discovery.AddressType) []string {
func getEndpointAddresses(podStatus v1.PodStatus, service *v1.Service, addressType discovery.AddressType) []string {
addresses := []string{}
for _, podIP := range podStatus.PodIPs {
@@ -134,7 +133,7 @@ func getEndpointAddresses(podStatus corev1.PodStatus, service *corev1.Service, a
// newEndpointSlice returns an EndpointSlice generated from a service and
// endpointMeta.
func newEndpointSlice(service *corev1.Service, endpointMeta *endpointMeta) *discovery.EndpointSlice {
func newEndpointSlice(service *v1.Service, endpointMeta *endpointMeta) *discovery.EndpointSlice {
gvk := schema.GroupVersionKind{Version: "v1", Kind: "Service"}
ownerRef := metav1.NewControllerRef(service, gvk)
epSlice := &discovery.EndpointSlice{
@@ -166,7 +165,7 @@ func getEndpointSlicePrefix(serviceName string) string {
// ownedBy returns true if the provided EndpointSlice is owned by the provided
// Service.
func ownedBy(endpointSlice *discovery.EndpointSlice, svc *corev1.Service) bool {
func ownedBy(endpointSlice *discovery.EndpointSlice, svc *v1.Service) bool {
for _, o := range endpointSlice.OwnerReferences {
if o.UID == svc.UID && o.Kind == "Service" && o.APIVersion == "v1" {
return true
@@ -222,9 +221,9 @@ func addTriggerTimeAnnotation(endpointSlice *discovery.EndpointSlice, triggerTim
}
if !triggerTime.IsZero() {
endpointSlice.Annotations[corev1.EndpointsLastChangeTriggerTime] = triggerTime.UTC().Format(time.RFC3339Nano)
endpointSlice.Annotations[v1.EndpointsLastChangeTriggerTime] = triggerTime.UTC().Format(time.RFC3339Nano)
} else { // No new trigger time, clear the annotation.
delete(endpointSlice.Annotations, corev1.EndpointsLastChangeTriggerTime)
delete(endpointSlice.Annotations, v1.EndpointsLastChangeTriggerTime)
}
}
@@ -244,7 +243,7 @@ func serviceControllerKey(endpointSlice *discovery.EndpointSlice) (string, error
// setEndpointSliceLabels returns a map with the new endpoint slices labels and true if there was an update.
// Slices labels must be equivalent to the Service labels except for the reserved IsHeadlessService, LabelServiceName and LabelManagedBy labels
// Changes to IsHeadlessService, LabelServiceName and LabelManagedBy labels on the Service do not result in updates to EndpointSlice labels.
func setEndpointSliceLabels(epSlice *discovery.EndpointSlice, service *corev1.Service) (map[string]string, bool) {
func setEndpointSliceLabels(epSlice *discovery.EndpointSlice, service *v1.Service) (map[string]string, bool) {
updated := false
epLabels := make(map[string]string)
svcLabels := make(map[string]string)
@@ -308,7 +307,7 @@ func (sl endpointSliceEndpointLen) Less(i, j int) bool {
}
// returns a map of address types used by a service
func getAddressTypesForService(service *corev1.Service) map[discovery.AddressType]struct{} {
func getAddressTypesForService(service *v1.Service) map[discovery.AddressType]struct{} {
serviceSupportedAddresses := make(map[discovery.AddressType]struct{})
// TODO: (khenidak) when address types are removed in favor of
// v1.IPFamily this will need to be removed, and work directly with
@@ -317,11 +316,11 @@ func getAddressTypesForService(service *corev1.Service) map[discovery.AddressTyp
// IMPORTANT: we assume that IP of (discovery.AddressType enum) is never in use
// as it gets deprecated
for _, family := range service.Spec.IPFamilies {
if family == corev1.IPv4Protocol {
if family == v1.IPv4Protocol {
serviceSupportedAddresses[discovery.AddressTypeIPv4] = struct{}{}
}
if family == corev1.IPv6Protocol {
if family == v1.IPv6Protocol {
serviceSupportedAddresses[discovery.AddressTypeIPv6] = struct{}{}
}
}
@@ -345,7 +344,7 @@ func getAddressTypesForService(service *corev1.Service) map[discovery.AddressTyp
// this ensures that traffic is not disrupted until then. But *may*
// include undesired families for headless services until then.
if len(service.Spec.ClusterIP) > 0 && service.Spec.ClusterIP != corev1.ClusterIPNone { // headfull
if len(service.Spec.ClusterIP) > 0 && service.Spec.ClusterIP != v1.ClusterIPNone { // headfull
addrType := discovery.AddressTypeIPv4
if utilnet.IsIPv6String(service.Spec.ClusterIP) {
addrType = discovery.AddressTypeIPv6
@@ -385,9 +384,9 @@ func unchangedSlices(existingSlices, slicesToUpdate, slicesToDelete []*discovery
}
// hintsEnabled returns true if the provided annotations include a
// corev1.AnnotationTopologyAwareHints key with a value set to "Auto" or "auto".
// v1.AnnotationTopologyAwareHints key with a value set to "Auto" or "auto".
func hintsEnabled(annotations map[string]string) bool {
val, ok := annotations[corev1.AnnotationTopologyAwareHints]
val, ok := annotations[v1.AnnotationTopologyAwareHints]
if !ok {
return false
}

View File

@@ -21,8 +21,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
discovery "k8s.io/api/discovery/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -54,12 +53,12 @@ func TestNewEndpointSlice(t *testing.T) {
testCases := []struct {
name string
tweakEndpoint func(ep *corev1.Endpoints)
tweakEndpoint func(ep *v1.Endpoints)
expectedSlice discovery.EndpointSlice
}{
{
name: "create slice from endpoints",
tweakEndpoint: func(ep *corev1.Endpoints) {
tweakEndpoint: func(ep *v1.Endpoints) {
},
expectedSlice: discovery.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
@@ -79,7 +78,7 @@ func TestNewEndpointSlice(t *testing.T) {
},
{
name: "create slice from endpoints with annotations",
tweakEndpoint: func(ep *corev1.Endpoints) {
tweakEndpoint: func(ep *v1.Endpoints) {
annotations := map[string]string{"foo": "bar"}
ep.Annotations = annotations
},
@@ -101,7 +100,7 @@ func TestNewEndpointSlice(t *testing.T) {
},
{
name: "create slice from endpoints with labels",
tweakEndpoint: func(ep *corev1.Endpoints) {
tweakEndpoint: func(ep *v1.Endpoints) {
labels := map[string]string{"foo": "bar"}
ep.Labels = labels
},
@@ -124,7 +123,7 @@ func TestNewEndpointSlice(t *testing.T) {
},
{
name: "create slice from endpoints with labels and annotations",
tweakEndpoint: func(ep *corev1.Endpoints) {
tweakEndpoint: func(ep *v1.Endpoints) {
labels := map[string]string{"foo": "bar"}
ep.Labels = labels
annotations := map[string]string{"foo2": "bar2"}
@@ -149,12 +148,12 @@ func TestNewEndpointSlice(t *testing.T) {
},
{
name: "create slice from endpoints with labels and annotations triggertime",
tweakEndpoint: func(ep *corev1.Endpoints) {
tweakEndpoint: func(ep *v1.Endpoints) {
labels := map[string]string{"foo": "bar"}
ep.Labels = labels
annotations := map[string]string{
"foo2": "bar2",
corev1.EndpointsLastChangeTriggerTime: "date",
"foo2": "bar2",
v1.EndpointsLastChangeTriggerTime: "date",
}
ep.Annotations = annotations
},

View File

@@ -35,7 +35,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
clientset "k8s.io/client-go/kubernetes"
clientset "k8s.io/client-go/kubernetes" // import known versions
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/metadata"
"k8s.io/client-go/tools/cache"
@@ -47,9 +47,6 @@ import (
c "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/apis/config/scheme"
"k8s.io/kubernetes/pkg/controller/garbagecollector/metrics"
// import known versions
_ "k8s.io/client-go/kubernetes"
)
// ResourceResyncTime defines the resync period of the garbage collector's informers.

View File

@@ -28,7 +28,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/klog/v2"
@@ -592,7 +591,7 @@ func (ao ascendingOrdinal) Less(i, j int) bool {
// Note that API validation has already guaranteed the maxUnavailable field to be >1 if it is an integer
// or 0% < value <= 100% if it is a percentage, so we don't have to consider other cases.
func getStatefulSetMaxUnavailable(maxUnavailable *intstr.IntOrString, replicaCount int) (int, error) {
maxUnavailableNum, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(1)), replicaCount, false)
maxUnavailableNum, err := intstr.GetScaledValueFromIntOrPercent(intstr.ValueOrDefault(maxUnavailable, intstr.FromInt(1)), replicaCount, false)
if err != nil {
return 0, err
}

View File

@@ -34,7 +34,6 @@ import (
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
kcache "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-helpers/storage/ephemeral"
@@ -57,13 +56,13 @@ type ephemeralController struct {
// objects from the API server. It is shared with other controllers and
// therefore the PVC objects in its store should be treated as immutable.
pvcLister corelisters.PersistentVolumeClaimLister
pvcsSynced kcache.InformerSynced
pvcsSynced cache.InformerSynced
// podLister is the shared Pod lister used to fetch Pod
// objects from the API server. It is shared with other controllers and
// therefore the Pod objects in its store should be treated as immutable.
podLister corelisters.PodLister
podSynced kcache.InformerSynced
podSynced cache.InformerSynced
// podIndexer has the common PodPVC indexer indexer installed To
// limit iteration over pods to those of interest.
@@ -98,7 +97,7 @@ func NewController(
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
ec.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "ephemeral_volume"})
podInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ec.enqueuePod,
// The pod spec is immutable. Therefore the controller can ignore pod updates
// because there cannot be any changes that have to be copied into the generated
@@ -106,7 +105,7 @@ func NewController(
// Deletion of the PVC is handled through the owner reference and garbage collection.
// Therefore pod deletions also can be ignored.
})
pvcInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
pvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: ec.onPVCDelete,
})
if err := common.AddPodPVCIndexerIfNotPresent(ec.podIndexer); err != nil {
@@ -130,7 +129,7 @@ func (ec *ephemeralController) enqueuePod(obj interface{}) {
for _, vol := range pod.Spec.Volumes {
if vol.Ephemeral != nil {
// It has at least one ephemeral inline volume, work on it.
key, err := kcache.DeletionHandlingMetaNamespaceKeyFunc(pod)
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pod)
if err != nil {
runtime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", pod, err))
return
@@ -208,7 +207,7 @@ func (ec *ephemeralController) processNextWorkItem(ctx context.Context) bool {
// syncHandler is invoked for each pod which might need to be processed.
// If an error is returned from this function, the pod will be requeued.
func (ec *ephemeralController) syncHandler(ctx context.Context, key string) error {
namespace, name, err := kcache.SplitMetaNamespaceKey(key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}

View File

@@ -33,7 +33,6 @@ import (
"k8s.io/client-go/kubernetes/fake"
k8stesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
kcache "k8s.io/client-go/tools/cache"
"k8s.io/component-base/metrics/testutil"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller"
@@ -213,7 +212,7 @@ func makePod(name, namespace string, uid types.UID, volumes ...v1.Volume) *v1.Po
}
func podKey(pod *v1.Pod) string {
key, _ := kcache.DeletionHandlingMetaNamespaceKeyFunc(testPodWithEphemeral)
key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(testPodWithEphemeral)
return key
}

View File

@@ -40,7 +40,6 @@ import (
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
kcache "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
cloudprovider "k8s.io/cloud-provider"
@@ -80,10 +79,10 @@ type expandController struct {
// objects from the API server. It is shared with other controllers and
// therefore the PVC objects in its store should be treated as immutable.
pvcLister corelisters.PersistentVolumeClaimLister
pvcsSynced kcache.InformerSynced
pvcsSynced cache.InformerSynced
pvLister corelisters.PersistentVolumeLister
pvSynced kcache.InformerSynced
pvSynced cache.InformerSynced
// cloud provider used by volume host
cloud cloudprovider.Interface
@@ -145,7 +144,7 @@ func NewExpandController(
expc.recorder,
blkutil)
pvcInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
pvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: expc.enqueuePVC,
UpdateFunc: func(old, new interface{}) {
oldPVC, ok := old.(*v1.PersistentVolumeClaim)
@@ -181,7 +180,7 @@ func (expc *expandController) enqueuePVC(obj interface{}) {
}
if pvc.Status.Phase == v1.ClaimBound {
key, err := kcache.DeletionHandlingMetaNamespaceKeyFunc(pvc)
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pvc)
if err != nil {
runtime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", pvc, err))
return
@@ -212,7 +211,7 @@ func (expc *expandController) processNextWorkItem(ctx context.Context) bool {
// syncHandler performs actual expansion of volume. If an error is returned
// from this function - PVC will be requeued for resizing.
func (expc *expandController) syncHandler(ctx context.Context, key string) error {
namespace, name, err := kcache.SplitMetaNamespaceKey(key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}

View File

@@ -45,7 +45,6 @@ import (
"k8s.io/kubernetes/pkg/controller"
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
"k8s.io/kubernetes/pkg/volume"
vol "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
)
@@ -226,7 +225,7 @@ func newTestController(kubeClient clientset.Interface, informerFactory informers
params := ControllerParameters{
KubeClient: kubeClient,
SyncPeriod: 5 * time.Second,
VolumePlugins: []vol.VolumePlugin{},
VolumePlugins: []volume.VolumePlugin{},
VolumeInformer: informerFactory.Core().V1().PersistentVolumes(),
ClaimInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
ClassInformer: informerFactory.Storage().V1().StorageClasses(),
@@ -614,7 +613,7 @@ func wrapTestWithPluginCalls(expectedRecycleCalls, expectedDeleteCalls []error,
deleteCalls: expectedDeleteCalls,
provisionCalls: expectedProvisionCalls,
}
ctrl.volumePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, nil /* prober */, ctrl)
ctrl.volumePluginMgr.InitPlugins([]volume.VolumePlugin{plugin}, nil /* prober */, ctrl)
return toWrap(ctrl, reactor, test)
}
}
@@ -657,7 +656,7 @@ func (t fakeCSIMigratedPluginManager) IsMigrationEnabledForPlugin(pluginName str
func wrapTestWithCSIMigrationProvisionCalls(toWrap testCall) testCall {
plugin := &mockVolumePlugin{}
return func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
ctrl.volumePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, nil /* prober */, ctrl)
ctrl.volumePluginMgr.InitPlugins([]volume.VolumePlugin{plugin}, nil /* prober */, ctrl)
ctrl.translator = fakeCSINameTranslator{}
ctrl.csiMigratedPluginManager = fakeCSIMigratedPluginManager{}
return toWrap(ctrl, reactor, test)
@@ -924,7 +923,7 @@ type mockVolumePlugin struct {
deleteCallCounter int
recycleCalls []error
recycleCallCounter int
provisionOptions vol.VolumeOptions
provisionOptions volume.VolumeOptions
}
type provisionCall struct {
@@ -932,12 +931,12 @@ type provisionCall struct {
ret error
}
var _ vol.VolumePlugin = &mockVolumePlugin{}
var _ vol.RecyclableVolumePlugin = &mockVolumePlugin{}
var _ vol.DeletableVolumePlugin = &mockVolumePlugin{}
var _ vol.ProvisionableVolumePlugin = &mockVolumePlugin{}
var _ volume.VolumePlugin = &mockVolumePlugin{}
var _ volume.RecyclableVolumePlugin = &mockVolumePlugin{}
var _ volume.DeletableVolumePlugin = &mockVolumePlugin{}
var _ volume.ProvisionableVolumePlugin = &mockVolumePlugin{}
func (plugin *mockVolumePlugin) Init(host vol.VolumeHost) error {
func (plugin *mockVolumePlugin) Init(host volume.VolumeHost) error {
return nil
}
@@ -945,11 +944,11 @@ func (plugin *mockVolumePlugin) GetPluginName() string {
return mockPluginName
}
func (plugin *mockVolumePlugin) GetVolumeName(spec *vol.Spec) (string, error) {
func (plugin *mockVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
return spec.Name(), nil
}
func (plugin *mockVolumePlugin) CanSupport(spec *vol.Spec) bool {
func (plugin *mockVolumePlugin) CanSupport(spec *volume.Spec) bool {
return true
}
@@ -965,21 +964,21 @@ func (plugin *mockVolumePlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *mockVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*vol.Spec, error) {
func (plugin *mockVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
return nil, nil
}
func (plugin *mockVolumePlugin) NewMounter(spec *vol.Spec, podRef *v1.Pod, opts vol.VolumeOptions) (vol.Mounter, error) {
func (plugin *mockVolumePlugin) NewMounter(spec *volume.Spec, podRef *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return nil, fmt.Errorf("Mounter is not supported by this plugin")
}
func (plugin *mockVolumePlugin) NewUnmounter(name string, podUID types.UID) (vol.Unmounter, error) {
func (plugin *mockVolumePlugin) NewUnmounter(name string, podUID types.UID) (volume.Unmounter, error) {
return nil, fmt.Errorf("Unmounter is not supported by this plugin")
}
// Provisioner interfaces
func (plugin *mockVolumePlugin) NewProvisioner(options vol.VolumeOptions) (vol.Provisioner, error) {
func (plugin *mockVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
if len(plugin.provisionCalls) > 0 {
// mockVolumePlugin directly implements Provisioner interface
klog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner")
@@ -1033,7 +1032,7 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
// Deleter interfaces
func (plugin *mockVolumePlugin) NewDeleter(spec *vol.Spec) (vol.Deleter, error) {
func (plugin *mockVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
if len(plugin.deleteCalls) > 0 {
// mockVolumePlugin directly implements Deleter interface
klog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter")
@@ -1059,13 +1058,13 @@ func (plugin *mockVolumePlugin) GetPath() string {
return ""
}
func (plugin *mockVolumePlugin) GetMetrics() (*vol.Metrics, error) {
func (plugin *mockVolumePlugin) GetMetrics() (*volume.Metrics, error) {
return nil, nil
}
// Recycler interfaces
func (plugin *mockVolumePlugin) Recycle(pvName string, spec *vol.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
func (plugin *mockVolumePlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
if len(plugin.recycleCalls) == 0 {
return fmt.Errorf("Mock plugin error: no recycleCalls configured")
}