Fix package aliases to follow golang convention

This commit is contained in:
Pengfei Ni 2016-11-30 15:27:27 +08:00
parent 27e62180e0
commit f584ed4398
106 changed files with 1318 additions and 1318 deletions

View File

@ -33,7 +33,7 @@ import (
"k8s.io/kubernetes/pkg/client/restclient"
kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
kdns "k8s.io/kubernetes/pkg/dns"
dnsConfig "k8s.io/kubernetes/pkg/dns/config"
dnsconfig "k8s.io/kubernetes/pkg/dns/config"
"k8s.io/kubernetes/pkg/runtime/schema"
)
@ -58,15 +58,15 @@ func NewKubeDNSServerDefault(config *options.KubeDNSConfig) *KubeDNSServer {
ks.dnsBindAddress = config.DNSBindAddress
ks.dnsPort = config.DNSPort
var configSync dnsConfig.Sync
var configSync dnsconfig.Sync
if config.ConfigMap == "" {
glog.V(0).Infof("ConfigMap not configured, using values from command line flags")
configSync = dnsConfig.NewNopSync(
&dnsConfig.Config{Federations: config.Federations})
configSync = dnsconfig.NewNopSync(
&dnsconfig.Config{Federations: config.Federations})
} else {
glog.V(0).Infof("Using configuration read from ConfigMap: %v:%v",
config.ConfigMapNs, config.ConfigMap)
configSync = dnsConfig.NewSync(
configSync = dnsconfig.NewSync(
kubeClient, config.ConfigMapNs, config.ConfigMap)
}

View File

@ -19,13 +19,13 @@ package cache
import (
"github.com/golang/glog"
"k8s.io/kubernetes/federation/apis/federation/v1beta1"
kubeCache "k8s.io/kubernetes/pkg/client/cache"
kubecache "k8s.io/kubernetes/pkg/client/cache"
)
// StoreToClusterLister makes a Store have the List method of the unversioned.ClusterInterface
// The Store must contain (only) clusters.
type StoreToClusterLister struct {
kubeCache.Store
kubecache.Store
}
func (s *StoreToClusterLister) List() (clusters v1beta1.ClusterList, err error) {
@ -41,7 +41,7 @@ type ClusterConditionPredicate func(cluster v1beta1.Cluster) bool
// storeToClusterConditionLister filters and returns nodes matching the given type and status from the store.
type storeToClusterConditionLister struct {
store kubeCache.Store
store kubecache.Store
predicate ClusterConditionPredicate
}

View File

@ -21,8 +21,8 @@ import (
"time"
"github.com/golang/glog"
federation_v1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
cluster_cache "k8s.io/kubernetes/federation/client/cache"
federationv1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
clustercache "k8s.io/kubernetes/federation/client/cache"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
@ -43,14 +43,14 @@ type ClusterController struct {
// clusterMonitorPeriod is the period for updating status of cluster
clusterMonitorPeriod time.Duration
// clusterClusterStatusMap is a mapping of clusterName and cluster status of last sampling
clusterClusterStatusMap map[string]federation_v1beta1.ClusterStatus
clusterClusterStatusMap map[string]federationv1beta1.ClusterStatus
// clusterKubeClientMap is a mapping of clusterName and restclient
clusterKubeClientMap map[string]ClusterClient
// cluster framework and store
clusterController *cache.Controller
clusterStore cluster_cache.StoreToClusterLister
clusterStore clustercache.StoreToClusterLister
}
// NewclusterController returns a new cluster controller
@ -59,7 +59,7 @@ func NewclusterController(federationClient federationclientset.Interface, cluste
knownClusterSet: make(sets.String),
federationClient: federationClient,
clusterMonitorPeriod: clusterMonitorPeriod,
clusterClusterStatusMap: make(map[string]federation_v1beta1.ClusterStatus),
clusterClusterStatusMap: make(map[string]federationv1beta1.ClusterStatus),
clusterKubeClientMap: make(map[string]ClusterClient),
}
cc.clusterStore.Store, cc.clusterController = cache.NewInformer(
@ -71,7 +71,7 @@ func NewclusterController(federationClient federationclientset.Interface, cluste
return cc.federationClient.Federation().Clusters().Watch(options)
},
},
&federation_v1beta1.Cluster{},
&federationv1beta1.Cluster{},
controller.NoResyncPeriodFunc(),
cache.ResourceEventHandlerFuncs{
DeleteFunc: cc.delFromClusterSet,
@ -84,7 +84,7 @@ func NewclusterController(federationClient federationclientset.Interface, cluste
// delFromClusterSet delete a cluster from clusterSet and
// delete the corresponding restclient from the map clusterKubeClientMap
func (cc *ClusterController) delFromClusterSet(obj interface{}) {
cluster := obj.(*federation_v1beta1.Cluster)
cluster := obj.(*federationv1beta1.Cluster)
cc.knownClusterSet.Delete(cluster.Name)
delete(cc.clusterKubeClientMap, cluster.Name)
}
@ -92,7 +92,7 @@ func (cc *ClusterController) delFromClusterSet(obj interface{}) {
// addToClusterSet insert the new cluster to clusterSet and create a corresponding
// restclient to map clusterKubeClientMap
func (cc *ClusterController) addToClusterSet(obj interface{}) {
cluster := obj.(*federation_v1beta1.Cluster)
cluster := obj.(*federationv1beta1.Cluster)
cc.knownClusterSet.Insert(cluster.Name)
// create the restclient of cluster
restClient, err := NewClusterClientSet(cluster)
@ -115,7 +115,7 @@ func (cc *ClusterController) Run() {
}, cc.clusterMonitorPeriod, wait.NeverStop)
}
func (cc *ClusterController) GetClusterStatus(cluster *federation_v1beta1.Cluster) (*federation_v1beta1.ClusterStatus, error) {
func (cc *ClusterController) GetClusterStatus(cluster *federationv1beta1.Cluster) (*federationv1beta1.ClusterStatus, error) {
// just get the status of cluster, by requesting the restapi "/healthz"
clusterClient, found := cc.clusterKubeClientMap[cluster.Name]
if !found {

View File

@ -23,9 +23,9 @@ import (
"net/http/httptest"
"testing"
federation_v1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationv1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
controller_util "k8s.io/kubernetes/federation/pkg/federation-controller/util"
controllerutil "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
@ -35,15 +35,15 @@ import (
"k8s.io/kubernetes/pkg/util/uuid"
)
func newCluster(clusterName string, serverUrl string) *federation_v1beta1.Cluster {
cluster := federation_v1beta1.Cluster{
func newCluster(clusterName string, serverUrl string) *federationv1beta1.Cluster {
cluster := federationv1beta1.Cluster{
TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Federation.GroupVersion().String()},
ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(),
Name: clusterName,
},
Spec: federation_v1beta1.ClusterSpec{
ServerAddressByClientCIDRs: []federation_v1beta1.ServerAddressByClientCIDR{
Spec: federationv1beta1.ClusterSpec{
ServerAddressByClientCIDRs: []federationv1beta1.ServerAddressByClientCIDR{
{
ClientCIDR: "0.0.0.0/0",
ServerAddress: serverUrl,
@ -54,13 +54,13 @@ func newCluster(clusterName string, serverUrl string) *federation_v1beta1.Cluste
return &cluster
}
func newClusterList(cluster *federation_v1beta1.Cluster) *federation_v1beta1.ClusterList {
clusterList := federation_v1beta1.ClusterList{
func newClusterList(cluster *federationv1beta1.Cluster) *federationv1beta1.ClusterList {
clusterList := federationv1beta1.ClusterList{
TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Federation.GroupVersion().String()},
ListMeta: unversioned.ListMeta{
SelfLink: "foobar",
},
Items: []federation_v1beta1.Cluster{},
Items: []federationv1beta1.Cluster{},
}
clusterList.Items = append(clusterList.Items, *cluster)
return &clusterList
@ -68,7 +68,7 @@ func newClusterList(cluster *federation_v1beta1.Cluster) *federation_v1beta1.Clu
// init a fake http handler, simulate a federation apiserver, response the "DELETE" "PUT" "GET" "UPDATE"
// when "canBeGotten" is false, means that user can not get the cluster cluster from apiserver
func createHttptestFakeHandlerForFederation(clusterList *federation_v1beta1.ClusterList, canBeGotten bool) *http.HandlerFunc {
func createHttptestFakeHandlerForFederation(clusterList *federationv1beta1.ClusterList, canBeGotten bool) *http.HandlerFunc {
fakeHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
clusterListString, _ := json.Marshal(*clusterList)
w.Header().Set("Content-Type", "application/json")
@ -125,8 +125,8 @@ func TestUpdateClusterStatusOK(t *testing.T) {
federationClientSet := federationclientset.NewForConfigOrDie(restclient.AddUserAgent(restClientCfg, "cluster-controller"))
// Override KubeconfigGetterForCluster to avoid having to setup service accounts and mount files with secret tokens.
originalGetter := controller_util.KubeconfigGetterForCluster
controller_util.KubeconfigGetterForCluster = func(c *federation_v1beta1.Cluster) clientcmd.KubeconfigGetter {
originalGetter := controllerutil.KubeconfigGetterForCluster
controllerutil.KubeconfigGetterForCluster = func(c *federationv1beta1.Cluster) clientcmd.KubeconfigGetter {
return func() (*clientcmdapi.Config, error) {
return &clientcmdapi.Config{}, nil
}
@ -141,11 +141,11 @@ func TestUpdateClusterStatusOK(t *testing.T) {
if !found {
t.Errorf("Failed to Update Cluster Status")
} else {
if (clusterStatus.Conditions[1].Status != v1.ConditionFalse) || (clusterStatus.Conditions[1].Type != federation_v1beta1.ClusterOffline) {
if (clusterStatus.Conditions[1].Status != v1.ConditionFalse) || (clusterStatus.Conditions[1].Type != federationv1beta1.ClusterOffline) {
t.Errorf("Failed to Update Cluster Status")
}
}
// Reset KubeconfigGetterForCluster
controller_util.KubeconfigGetterForCluster = originalGetter
controllerutil.KubeconfigGetterForCluster = originalGetter
}

View File

@ -19,17 +19,17 @@ package configmap
import (
"time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink"
"k8s.io/kubernetes/pkg/api"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/watch"
@ -79,7 +79,7 @@ type ConfigMapController struct {
func NewConfigMapController(client federationclientset.Interface) *ConfigMapController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(api_v1.EventSource{Component: "federated-configmaps-controller"})
recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-configmaps-controller"})
configmapcontroller := &ConfigMapController{
federatedApiClient: client,
@ -98,43 +98,43 @@ func NewConfigMapController(client federationclientset.Interface) *ConfigMapCont
// Start informer on federated API servers on configmaps that should be federated.
configmapcontroller.configmapInformerStore, configmapcontroller.configmapInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) {
return client.Core().ConfigMaps(api_v1.NamespaceAll).List(options)
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return client.Core().ConfigMaps(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
return client.Core().ConfigMaps(api_v1.NamespaceAll).Watch(options)
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return client.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options)
},
},
&api_v1.ConfigMap{},
&apiv1.ConfigMap{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { configmapcontroller.deliverConfigMapObj(obj, 0, false) }))
util.NewTriggerOnAllChanges(func(obj pkgruntime.Object) { configmapcontroller.deliverConfigMapObj(obj, 0, false) }))
// Federated informer on configmaps in members of federation.
configmapcontroller.configmapFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) {
return targetClient.Core().ConfigMaps(api_v1.NamespaceAll).List(options)
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
return targetClient.Core().ConfigMaps(api_v1.NamespaceAll).Watch(options)
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options)
},
},
&api_v1.ConfigMap{},
&apiv1.ConfigMap{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some configmap opration succeeded.
util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) {
func(obj pkgruntime.Object) {
configmapcontroller.deliverConfigMapObj(obj, configmapcontroller.configmapReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) {
ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the configmaps again.
configmapcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(configmapcontroller.clusterAvailableDelay))
},
@ -143,19 +143,19 @@ func NewConfigMapController(client federationclientset.Interface) *ConfigMapCont
// Federated updater along with Create/Update/Delete operations.
configmapcontroller.federatedUpdater = util.NewFederatedUpdater(configmapcontroller.configmapFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
configmap := obj.(*api_v1.ConfigMap)
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*apiv1.ConfigMap)
_, err := client.Core().ConfigMaps(configmap.Namespace).Create(configmap)
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
configmap := obj.(*api_v1.ConfigMap)
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*apiv1.ConfigMap)
_, err := client.Core().ConfigMaps(configmap.Namespace).Update(configmap)
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
configmap := obj.(*api_v1.ConfigMap)
err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &api_v1.DeleteOptions{})
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*apiv1.ConfigMap)
err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &apiv1.DeleteOptions{})
return err
})
return configmapcontroller
@ -179,7 +179,7 @@ func (configmapcontroller *ConfigMapController) Run(stopChan <-chan struct{}) {
}
func (configmapcontroller *ConfigMapController) deliverConfigMapObj(obj interface{}, delay time.Duration, failed bool) {
configmap := obj.(*api_v1.ConfigMap)
configmap := obj.(*apiv1.ConfigMap)
configmapcontroller.deliverConfigMap(types.NamespacedName{Namespace: configmap.Namespace, Name: configmap.Name}, delay, failed)
}
@ -220,7 +220,7 @@ func (configmapcontroller *ConfigMapController) reconcileConfigMapsOnClusterChan
configmapcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(configmapcontroller.clusterAvailableDelay))
}
for _, obj := range configmapcontroller.configmapInformerStore.List() {
configmap := obj.(*api_v1.ConfigMap)
configmap := obj.(*apiv1.ConfigMap)
configmapcontroller.deliverConfigMap(types.NamespacedName{Namespace: configmap.Namespace, Name: configmap.Name},
configmapcontroller.smallDelay, false)
}
@ -247,7 +247,7 @@ func (configmapcontroller *ConfigMapController) reconcileConfigMap(configmap typ
glog.V(8).Infof("Skipping not federated config map: %s", key)
return
}
baseConfigMap := baseConfigMapObj.(*api_v1.ConfigMap)
baseConfigMap := baseConfigMapObj.(*apiv1.ConfigMap)
clusters, err := configmapcontroller.configmapFederatedInformer.GetReadyClusters()
if err != nil {
@ -266,7 +266,7 @@ func (configmapcontroller *ConfigMapController) reconcileConfigMap(configmap typ
}
// Do not modify data.
desiredConfigMap := &api_v1.ConfigMap{
desiredConfigMap := &apiv1.ConfigMap{
ObjectMeta: util.DeepCopyRelevantObjectMeta(baseConfigMap.ObjectMeta),
Data: baseConfigMap.Data,
}
@ -281,7 +281,7 @@ func (configmapcontroller *ConfigMapController) reconcileConfigMap(configmap typ
ClusterName: cluster.Name,
})
} else {
clusterConfigMap := clusterConfigMapObj.(*api_v1.ConfigMap)
clusterConfigMap := clusterConfigMapObj.(*apiv1.ConfigMap)
// Update existing configmap, if needed.
if !util.ConfigMapEquivalent(desiredConfigMap, clusterConfigMap) {

View File

@ -21,13 +21,13 @@ import (
"testing"
"time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fake_fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait"
@ -36,29 +36,29 @@ import (
)
func TestConfigMapController(t *testing.T) {
cluster1 := NewCluster("cluster1", api_v1.ConditionTrue)
cluster2 := NewCluster("cluster2", api_v1.ConditionTrue)
cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
fakeClient := &fake_fedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federation_api.ClusterList{Items: []federation_api.Cluster{*cluster1}})
RegisterFakeList("configmaps", &fakeClient.Fake, &api_v1.ConfigMapList{Items: []api_v1.ConfigMap{}})
fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federationapi.ClusterList{Items: []federationapi.Cluster{*cluster1}})
RegisterFakeList("configmaps", &fakeClient.Fake, &apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}})
configmapWatch := RegisterFakeWatch("configmaps", &fakeClient.Fake)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fake_kubeclientset.Clientset{}
cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("configmaps", &cluster1Client.Fake)
RegisterFakeList("configmaps", &cluster1Client.Fake, &api_v1.ConfigMapList{Items: []api_v1.ConfigMap{}})
RegisterFakeList("configmaps", &cluster1Client.Fake, &apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("configmaps", &cluster1Client.Fake, cluster1Watch)
cluster1UpdateChan := RegisterFakeCopyOnUpdate("configmaps", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fake_kubeclientset.Clientset{}
cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("configmaps", &cluster2Client.Fake)
RegisterFakeList("configmaps", &cluster2Client.Fake, &api_v1.ConfigMapList{Items: []api_v1.ConfigMap{}})
RegisterFakeList("configmaps", &cluster2Client.Fake, &apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}})
cluster2CreateChan := RegisterFakeCopyOnCreate("configmaps", &cluster2Client.Fake, cluster2Watch)
configmapController := NewConfigMapController(fakeClient)
informer := ToFederatedInformerForTestOnly(configmapController.configmapFederatedInformer)
informer.SetClientFactory(func(cluster *federation_api.Cluster) (kubeclientset.Interface, error) {
informer.SetClientFactory(func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name {
case cluster1.Name:
return cluster1Client, nil
@ -77,8 +77,8 @@ func TestConfigMapController(t *testing.T) {
stop := make(chan struct{})
configmapController.Run(stop)
configmap1 := &api_v1.ConfigMap{
ObjectMeta: api_v1.ObjectMeta{
configmap1 := &apiv1.ConfigMap{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-configmap",
Namespace: "ns",
SelfLink: "/api/v1/namespaces/ns/configmaps/test-configmap",
@ -136,7 +136,7 @@ func TestConfigMapController(t *testing.T) {
close(stop)
}
func GetConfigMapFromChan(c chan runtime.Object) *api_v1.ConfigMap {
configmap := GetObjectFromChan(c).(*api_v1.ConfigMap)
func GetConfigMapFromChan(c chan runtime.Object) *apiv1.ConfigMap {
configmap := GetObjectFromChan(c).(*apiv1.ConfigMap)
return configmap
}

View File

@ -21,21 +21,21 @@ import (
"reflect"
"time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/conversion"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/watch"
@ -87,7 +87,7 @@ type DaemonSetController struct {
func NewDaemonSetController(client federationclientset.Interface) *DaemonSetController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(api_v1.EventSource{Component: "federated-daemonset-controller"})
recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-daemonset-controller"})
daemonsetcontroller := &DaemonSetController{
federatedApiClient: client,
@ -106,28 +106,28 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
// Start informer in federated API servers on daemonsets that should be federated.
daemonsetcontroller.daemonsetInformerStore, daemonsetcontroller.daemonsetInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) {
return client.Extensions().DaemonSets(api_v1.NamespaceAll).List(options)
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return client.Extensions().DaemonSets(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
return client.Extensions().DaemonSets(api_v1.NamespaceAll).Watch(options)
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return client.Extensions().DaemonSets(apiv1.NamespaceAll).Watch(options)
},
},
&extensionsv1.DaemonSet{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { daemonsetcontroller.deliverDaemonSetObj(obj, 0, false) }))
util.NewTriggerOnAllChanges(func(obj pkgruntime.Object) { daemonsetcontroller.deliverDaemonSetObj(obj, 0, false) }))
// Federated informer on daemonsets in members of federation.
daemonsetcontroller.daemonsetFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) {
return targetClient.Extensions().DaemonSets(api_v1.NamespaceAll).List(options)
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Extensions().DaemonSets(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
return targetClient.Extensions().DaemonSets(api_v1.NamespaceAll).Watch(options)
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return targetClient.Extensions().DaemonSets(apiv1.NamespaceAll).Watch(options)
},
},
&extensionsv1.DaemonSet{},
@ -135,14 +135,14 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some daemonset opration succeeded.
util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) {
func(obj pkgruntime.Object) {
daemonsetcontroller.deliverDaemonSetObj(obj, daemonsetcontroller.daemonsetReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) {
ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the daemonsets again.
daemonsetcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(daemonsetcontroller.clusterAvailableDelay))
},
@ -151,7 +151,7 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
// Federated updater along with Create/Update/Delete operations.
daemonsetcontroller.federatedUpdater = util.NewFederatedUpdater(daemonsetcontroller.daemonsetFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to create daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
_, err := client.Extensions().DaemonSets(daemonset.Namespace).Create(daemonset)
@ -162,7 +162,7 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
}
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to update daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
_, err := client.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset)
@ -173,10 +173,10 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
}
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to delete daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &api_v1.DeleteOptions{})
err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &apiv1.DeleteOptions{})
if err != nil {
glog.Errorf("Error deleting daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
} else {
@ -190,7 +190,7 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
daemonsetcontroller.removeFinalizerFunc,
daemonsetcontroller.addFinalizerFunc,
// objNameFunc
func(obj pkg_runtime.Object) string {
func(obj pkgruntime.Object) string {
daemonset := obj.(*extensionsv1.DaemonSet)
return daemonset.Name
},
@ -204,7 +204,7 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
}
// Returns true if the given object has the given finalizer in its ObjectMeta.
func (daemonsetcontroller *DaemonSetController) hasFinalizerFunc(obj pkg_runtime.Object, finalizer string) bool {
func (daemonsetcontroller *DaemonSetController) hasFinalizerFunc(obj pkgruntime.Object, finalizer string) bool {
daemonset := obj.(*extensionsv1.DaemonSet)
for i := range daemonset.ObjectMeta.Finalizers {
if string(daemonset.ObjectMeta.Finalizers[i]) == finalizer {
@ -216,7 +216,7 @@ func (daemonsetcontroller *DaemonSetController) hasFinalizerFunc(obj pkg_runtime
// Removes the finalizer from the given objects ObjectMeta.
// Assumes that the given object is a daemonset.
func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkg_runtime.Object, finalizer string) (pkg_runtime.Object, error) {
func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
daemonset := obj.(*extensionsv1.DaemonSet)
newFinalizers := []string{}
hasFinalizer := false
@ -241,7 +241,7 @@ func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkg_runt
// Adds the given finalizer to the given objects ObjectMeta.
// Assumes that the given object is a daemonset.
func (daemonsetcontroller *DaemonSetController) addFinalizerFunc(obj pkg_runtime.Object, finalizer string) (pkg_runtime.Object, error) {
func (daemonsetcontroller *DaemonSetController) addFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
daemonset := obj.(*extensionsv1.DaemonSet)
daemonset.ObjectMeta.Finalizers = append(daemonset.ObjectMeta.Finalizers, finalizer)
daemonset, err := daemonsetcontroller.federatedApiClient.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset)

View File

@ -22,16 +22,16 @@ import (
"testing"
"time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fake_fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
//"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
"k8s.io/kubernetes/pkg/api/unversioned"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
@ -39,30 +39,30 @@ import (
)
func TestDaemonSetController(t *testing.T) {
cluster1 := NewCluster("cluster1", api_v1.ConditionTrue)
cluster2 := NewCluster("cluster2", api_v1.ConditionTrue)
cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
fakeClient := &fake_fedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federation_api.ClusterList{Items: []federation_api.Cluster{*cluster1}})
fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federationapi.ClusterList{Items: []federationapi.Cluster{*cluster1}})
RegisterFakeList("daemonsets", &fakeClient.Fake, &extensionsv1.DaemonSetList{Items: []extensionsv1.DaemonSet{}})
daemonsetWatch := RegisterFakeWatch("daemonsets", &fakeClient.Fake)
// daemonsetUpdateChan := RegisterFakeCopyOnUpdate("daemonsets", &fakeClient.Fake, daemonsetWatch)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fake_kubeclientset.Clientset{}
cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("daemonsets", &cluster1Client.Fake)
RegisterFakeList("daemonsets", &cluster1Client.Fake, &extensionsv1.DaemonSetList{Items: []extensionsv1.DaemonSet{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("daemonsets", &cluster1Client.Fake, cluster1Watch)
// cluster1UpdateChan := RegisterFakeCopyOnUpdate("daemonsets", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fake_kubeclientset.Clientset{}
cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("daemonsets", &cluster2Client.Fake)
RegisterFakeList("daemonsets", &cluster2Client.Fake, &extensionsv1.DaemonSetList{Items: []extensionsv1.DaemonSet{}})
cluster2CreateChan := RegisterFakeCopyOnCreate("daemonsets", &cluster2Client.Fake, cluster2Watch)
daemonsetController := NewDaemonSetController(fakeClient)
informer := ToFederatedInformerForTestOnly(daemonsetController.daemonsetFederatedInformer)
informer.SetClientFactory(func(cluster *federation_api.Cluster) (kubeclientset.Interface, error) {
informer.SetClientFactory(func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name {
case cluster1.Name:
return cluster1Client, nil
@ -82,7 +82,7 @@ func TestDaemonSetController(t *testing.T) {
daemonsetController.Run(stop)
daemonset1 := extensionsv1.DaemonSet{
ObjectMeta: api_v1.ObjectMeta{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-daemonset",
Namespace: "ns",
SelfLink: "/api/v1/namespaces/ns/daemonsets/test-daemonset",
@ -102,7 +102,7 @@ func TestDaemonSetController(t *testing.T) {
updatedDaemonSet := GetDaemonSetFromChan(daemonsetUpdateChan)
assert.True(t, daemonsetController.hasFinalizerFunc(updatedDaemonSet, deletionhelper.FinalizerDeleteFromUnderlyingClusters))
updatedDaemonSet = GetDaemonSetFromChan(daemonsetUpdateChan)
assert.True(t, daemonsetController.hasFinalizerFunc(updatedDaemonSet, api_v1.FinalizerOrphan))
assert.True(t, daemonsetController.hasFinalizerFunc(updatedDaemonSet, apiv1.FinalizerOrphan))
daemonset1 = *updatedDaemonSet
*/
createdDaemonSet := GetDaemonSetFromChan(cluster1CreateChan)

View File

@ -23,13 +23,13 @@ import (
"time"
fedv1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fake_fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
"k8s.io/kubernetes/pkg/api/meta"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait"
@ -83,19 +83,19 @@ func TestDeploymentController(t *testing.T) {
cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
fakeClient := &fake_fedclientset.Clientset{}
fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &fedv1.ClusterList{Items: []fedv1.Cluster{*cluster1}})
deploymentsWatch := RegisterFakeWatch("deployments", &fakeClient.Fake)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fake_kubeclientset.Clientset{}
cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("deployments", &cluster1Client.Fake)
_ = RegisterFakeWatch("pods", &cluster1Client.Fake)
RegisterFakeList("deployments", &cluster1Client.Fake, &extensionsv1.DeploymentList{Items: []extensionsv1.Deployment{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("deployments", &cluster1Client.Fake, cluster1Watch)
cluster1UpdateChan := RegisterFakeCopyOnUpdate("deployments", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fake_kubeclientset.Clientset{}
cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("deployments", &cluster2Client.Fake)
_ = RegisterFakeWatch("pods", &cluster2Client.Fake)
RegisterFakeList("deployments", &cluster2Client.Fake, &extensionsv1.DeploymentList{Items: []extensionsv1.Deployment{}})

View File

@ -21,7 +21,7 @@ import (
"sync"
"time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
@ -29,13 +29,13 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
extensions_v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
extensionsv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/conversion"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/watch"
@ -134,17 +134,17 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
// Start informer in federated API servers on ingresses that should be federated.
ic.ingressInformerStore, ic.ingressInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) {
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return client.Extensions().Ingresses(api.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return client.Extensions().Ingresses(api.NamespaceAll).Watch(options)
},
},
&extensions_v1beta1.Ingress{},
&extensionsv1beta1.Ingress{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) {
func(obj pkgruntime.Object) {
ic.deliverIngressObj(obj, 0, false)
},
))
@ -152,29 +152,29 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
// Federated informer on ingresses in members of federation.
ic.ingressFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) {
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Extensions().Ingresses(api.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return targetClient.Extensions().Ingresses(api.NamespaceAll).Watch(options)
},
},
&extensions_v1beta1.Ingress{},
&extensionsv1beta1.Ingress{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some ingress operation succeeded.
util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) {
func(obj pkgruntime.Object) {
ic.deliverIngressObj(obj, ic.ingressReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) {
ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the ingresses again, and configure it's ingress controller's configmap with the correct UID
ic.clusterDeliverer.DeliverAfter(cluster.Name, cluster, ic.clusterAvailableDelay)
},
@ -184,11 +184,11 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
// Federated informer on configmaps for ingress controllers in members of the federation.
ic.configMapFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
glog.V(4).Infof("Returning new informer for cluster %q", cluster.Name)
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) {
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
if targetClient == nil {
glog.Errorf("Internal error: targetClient is nil")
}
@ -206,14 +206,14 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
// Trigger reconcilation whenever the ingress controller's configmap in a federated cluster is changed. In most cases it
// would be just confirmation that the configmap for the ingress controller is correct.
util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) {
func(obj pkgruntime.Object) {
ic.deliverConfigMapObj(cluster.Name, obj, ic.configMapReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) {
ClusterAvailable: func(cluster *federationapi.Cluster) {
ic.clusterDeliverer.DeliverAfter(cluster.Name, cluster, ic.clusterAvailableDelay)
},
},
@ -221,8 +221,8 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
// Federated ingress updater along with Create/Update/Delete operations.
ic.federatedIngressUpdater = util.NewFederatedUpdater(ic.ingressFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
ingress := obj.(*extensions_v1beta1.Ingress)
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
ingress := obj.(*extensionsv1beta1.Ingress)
glog.V(4).Infof("Attempting to create Ingress: %v", ingress)
_, err := client.Extensions().Ingresses(ingress.Namespace).Create(ingress)
if err != nil {
@ -232,8 +232,8 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
}
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
ingress := obj.(*extensions_v1beta1.Ingress)
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
ingress := obj.(*extensionsv1beta1.Ingress)
glog.V(4).Infof("Attempting to update Ingress: %v", ingress)
_, err := client.Extensions().Ingresses(ingress.Namespace).Update(ingress)
if err != nil {
@ -243,8 +243,8 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
}
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
ingress := obj.(*extensions_v1beta1.Ingress)
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
ingress := obj.(*extensionsv1beta1.Ingress)
glog.V(4).Infof("Attempting to delete Ingress: %v", ingress)
err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &v1.DeleteOptions{})
return err
@ -252,14 +252,14 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
// Federated configmap updater along with Create/Update/Delete operations. Only Update should ever be called.
ic.federatedConfigMapUpdater = util.NewFederatedUpdater(ic.configMapFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configMap := obj.(*v1.ConfigMap)
configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace}
glog.Errorf("Internal error: Incorrectly attempting to create ConfigMap: %q", configMapName)
_, err := client.Core().ConfigMaps(configMap.Namespace).Create(configMap)
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configMap := obj.(*v1.ConfigMap)
configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace}
glog.V(4).Infof("Attempting to update ConfigMap: %v", configMap)
@ -271,7 +271,7 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
}
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configMap := obj.(*v1.ConfigMap)
configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace}
glog.Errorf("Internal error: Incorrectly attempting to delete ConfigMap: %q", configMapName)
@ -284,8 +284,8 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
ic.removeFinalizerFunc,
ic.addFinalizerFunc,
// objNameFunc
func(obj pkg_runtime.Object) string {
ingress := obj.(*extensions_v1beta1.Ingress)
func(obj pkgruntime.Object) string {
ingress := obj.(*extensionsv1beta1.Ingress)
return ingress.Name
},
ic.updateTimeout,
@ -297,8 +297,8 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
}
// Returns true if the given object has the given finalizer in its ObjectMeta.
func (ic *IngressController) hasFinalizerFunc(obj pkg_runtime.Object, finalizer string) bool {
ingress := obj.(*extensions_v1beta1.Ingress)
func (ic *IngressController) hasFinalizerFunc(obj pkgruntime.Object, finalizer string) bool {
ingress := obj.(*extensionsv1beta1.Ingress)
for i := range ingress.ObjectMeta.Finalizers {
if string(ingress.ObjectMeta.Finalizers[i]) == finalizer {
return true
@ -309,8 +309,8 @@ func (ic *IngressController) hasFinalizerFunc(obj pkg_runtime.Object, finalizer
// Removes the finalizer from the given objects ObjectMeta.
// Assumes that the given object is a ingress.
func (ic *IngressController) removeFinalizerFunc(obj pkg_runtime.Object, finalizer string) (pkg_runtime.Object, error) {
ingress := obj.(*extensions_v1beta1.Ingress)
func (ic *IngressController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
ingress := obj.(*extensionsv1beta1.Ingress)
newFinalizers := []string{}
hasFinalizer := false
for i := range ingress.ObjectMeta.Finalizers {
@ -334,8 +334,8 @@ func (ic *IngressController) removeFinalizerFunc(obj pkg_runtime.Object, finaliz
// Adds the given finalizer to the given objects ObjectMeta.
// Assumes that the given object is a ingress.
func (ic *IngressController) addFinalizerFunc(obj pkg_runtime.Object, finalizer string) (pkg_runtime.Object, error) {
ingress := obj.(*extensions_v1beta1.Ingress)
func (ic *IngressController) addFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
ingress := obj.(*extensionsv1beta1.Ingress)
ingress.ObjectMeta.Finalizers = append(ingress.ObjectMeta.Finalizers, finalizer)
ingress, err := ic.federatedApiClient.Extensions().Ingresses(ingress.Namespace).Update(ingress)
if err != nil {
@ -388,7 +388,7 @@ func (ic *IngressController) Run(stopChan <-chan struct{}) {
}
func (ic *IngressController) deliverIngressObj(obj interface{}, delay time.Duration, failed bool) {
ingress := obj.(*extensions_v1beta1.Ingress)
ingress := obj.(*extensionsv1beta1.Ingress)
ic.deliverIngress(types.NamespacedName{Namespace: ingress.Namespace, Name: ingress.Name}, delay, failed)
}
@ -468,7 +468,7 @@ func (ic *IngressController) reconcileIngressesOnClusterChange(clusterName strin
}
for _, obj := range ingressList {
ingress := obj.(*extensions_v1beta1.Ingress)
ingress := obj.(*extensionsv1beta1.Ingress)
nsName := types.NamespacedName{Name: ingress.Name, Namespace: ingress.Namespace}
glog.V(4).Infof("Delivering federated ingress %q for cluster %q", nsName, clusterName)
ic.deliverIngress(nsName, ic.smallDelay, false)
@ -537,7 +537,7 @@ func (ic *IngressController) reconcileConfigMapForCluster(clusterName string) {
In cases 2 and 3, the configmaps will be updated in the next cycle, triggered by the federation cluster update(s)
*/
func (ic *IngressController) reconcileConfigMap(cluster *federation_api.Cluster, configMap *v1.ConfigMap) {
func (ic *IngressController) reconcileConfigMap(cluster *federationapi.Cluster, configMap *v1.ConfigMap) {
ic.Lock() // TODO: Reduce the scope of this master election lock.
defer ic.Unlock()
@ -580,7 +580,7 @@ func (ic *IngressController) reconcileConfigMap(cluster *federation_api.Cluster,
If there is no elected master cluster, an error is returned.
All other clusters must use the ingress UID of the elected master.
*/
func (ic *IngressController) getMasterCluster() (master *federation_api.Cluster, ingressUID string, err error) {
func (ic *IngressController) getMasterCluster() (master *federationapi.Cluster, ingressUID string, err error) {
clusters, err := ic.configMapFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get cluster list: %v", err)
@ -601,10 +601,10 @@ func (ic *IngressController) getMasterCluster() (master *federation_api.Cluster,
updateClusterIngressUIDToMasters takes the ingress UID annotation on the master cluster and applies it to cluster.
If there is no master cluster, then fallbackUID is used (and hence this cluster becomes the master).
*/
func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federation_api.Cluster, fallbackUID string) {
func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federationapi.Cluster, fallbackUID string) {
masterCluster, masterUID, err := ic.getMasterCluster()
clusterObj, clusterErr := conversion.NewCloner().DeepCopy(cluster) // Make a clone so that we don't clobber our input param
cluster, ok := clusterObj.(*federation_api.Cluster)
cluster, ok := clusterObj.(*federationapi.Cluster)
if clusterErr != nil || !ok {
glog.Errorf("Internal error: Failed clone cluster resource while attempting to add master ingress UID annotation (%q = %q) from master cluster %q to cluster %q, will try again later: %v", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name, err)
return
@ -649,7 +649,7 @@ func (ic *IngressController) isClusterReady(clusterName string) bool {
// updateAnnotationOnIngress updates the annotation with the given key on the given federated ingress.
// Queues the ingress for resync when done.
func (ic *IngressController) updateAnnotationOnIngress(ingress *extensions_v1beta1.Ingress, key, value string) {
func (ic *IngressController) updateAnnotationOnIngress(ingress *extensionsv1beta1.Ingress, key, value string) {
if ingress.ObjectMeta.Annotations == nil {
ingress.ObjectMeta.Annotations = make(map[string]string)
}
@ -687,9 +687,9 @@ func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
return
}
baseIngressObj, err := conversion.NewCloner().DeepCopy(baseIngressObjFromStore)
baseIngress, ok := baseIngressObj.(*extensions_v1beta1.Ingress)
baseIngress, ok := baseIngressObj.(*extensionsv1beta1.Ingress)
if err != nil || !ok {
glog.Errorf("Internal Error %v : Object retrieved from ingressInformerStore with key %q is not of correct type *extensions_v1beta1.Ingress: %v", err, key, baseIngressObj)
glog.Errorf("Internal Error %v : Object retrieved from ingressInformerStore with key %q is not of correct type *extensionsv1beta1.Ingress: %v", err, key, baseIngressObj)
} else {
glog.V(4).Infof("Base (federated) ingress: %v", baseIngress)
}
@ -714,7 +714,7 @@ func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
ic.deliverIngress(ingress, 0, false)
return
}
baseIngress = updatedIngressObj.(*extensions_v1beta1.Ingress)
baseIngress = updatedIngressObj.(*extensionsv1beta1.Ingress)
glog.V(3).Infof("Syncing ingress %s in underlying clusters", baseIngress.Name)
@ -738,7 +738,7 @@ func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
ic.deliverIngress(ingress, 0, true)
return
}
desiredIngress := &extensions_v1beta1.Ingress{}
desiredIngress := &extensionsv1beta1.Ingress{}
objMeta, err := conversion.NewCloner().DeepCopy(baseIngress.ObjectMeta)
if err != nil {
glog.Errorf("Error deep copying ObjectMeta: %v", err)
@ -751,9 +751,9 @@ func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
if !ok {
glog.Errorf("Internal error: Failed to cast to v1.ObjectMeta: %v", objMeta)
}
desiredIngress.Spec = objSpec.(extensions_v1beta1.IngressSpec)
desiredIngress.Spec = objSpec.(extensionsv1beta1.IngressSpec)
if !ok {
glog.Errorf("Internal error: Failed to cast to extensions_v1beta1.Ingressespec: %v", objSpec)
glog.Errorf("Internal error: Failed to cast to extensionsv1beta1.Ingressespec: %v", objSpec)
}
glog.V(4).Infof("Desired Ingress: %v", desiredIngress)
@ -793,7 +793,7 @@ func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
glog.V(4).Infof("No annotation %q exists on ingress %q in federation and waiting for ingress in cluster %s. Not queueing create operation for ingress until annotation exists", staticIPNameKeyWritable, ingress, firstClusterName)
}
} else {
clusterIngress := clusterIngressObj.(*extensions_v1beta1.Ingress)
clusterIngress := clusterIngressObj.(*extensionsv1beta1.Ingress)
glog.V(4).Infof("Found existing Ingress %s in cluster %s - checking if update is required (in either direction)", ingress, cluster.Name)
clusterIPName, clusterIPNameExists := clusterIngress.ObjectMeta.Annotations[staticIPNameKeyReadonly]
baseLBStatusExists := len(baseIngress.Status.LoadBalancer.Ingress) > 0
@ -893,7 +893,7 @@ func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
}
// delete deletes the given ingress or returns error if the deletion was not complete.
func (ic *IngressController) delete(ingress *extensions_v1beta1.Ingress) error {
func (ic *IngressController) delete(ingress *extensionsv1beta1.Ingress) error {
glog.V(3).Infof("Handling deletion of ingress: %v", *ingress)
_, err := ic.deletionHelper.HandleObjectInUnderlyingClusters(ingress)
if err != nil {

View File

@ -22,17 +22,17 @@ import (
"testing"
"time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fake_fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
"k8s.io/kubernetes/pkg/api/errors"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
extensions_v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait"
@ -41,40 +41,40 @@ import (
)
func TestIngressController(t *testing.T) {
fakeClusterList := federation_api.ClusterList{Items: []federation_api.Cluster{}}
fakeConfigMapList1 := api_v1.ConfigMapList{Items: []api_v1.ConfigMap{}}
fakeConfigMapList2 := api_v1.ConfigMapList{Items: []api_v1.ConfigMap{}}
cluster1 := NewCluster("cluster1", api_v1.ConditionTrue)
cluster2 := NewCluster("cluster2", api_v1.ConditionTrue)
fakeClusterList := federationapi.ClusterList{Items: []federationapi.Cluster{}}
fakeConfigMapList1 := apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}}
fakeConfigMapList2 := apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}}
cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
cfg1 := NewConfigMap("foo")
cfg2 := NewConfigMap("bar") // Different UID from cfg1, so that we can check that they get reconciled.
t.Log("Creating fake infrastructure")
fedClient := &fake_fedclientset.Clientset{}
fedClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fedClient.Fake, &fakeClusterList)
RegisterFakeList("ingresses", &fedClient.Fake, &extensions_v1beta1.IngressList{Items: []extensions_v1beta1.Ingress{}})
RegisterFakeList("ingresses", &fedClient.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}})
fedIngressWatch := RegisterFakeWatch("ingresses", &fedClient.Fake)
clusterWatch := RegisterFakeWatch("clusters", &fedClient.Fake)
fedClusterUpdateChan := RegisterFakeCopyOnUpdate("clusters", &fedClient.Fake, clusterWatch)
//fedIngressUpdateChan := RegisterFakeCopyOnUpdate("ingresses", &fedClient.Fake, fedIngressWatch)
cluster1Client := &fake_kubeclientset.Clientset{}
RegisterFakeList("ingresses", &cluster1Client.Fake, &extensions_v1beta1.IngressList{Items: []extensions_v1beta1.Ingress{}})
cluster1Client := &fakekubeclientset.Clientset{}
RegisterFakeList("ingresses", &cluster1Client.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}})
RegisterFakeList("configmaps", &cluster1Client.Fake, &fakeConfigMapList1)
cluster1IngressWatch := RegisterFakeWatch("ingresses", &cluster1Client.Fake)
cluster1ConfigMapWatch := RegisterFakeWatch("configmaps", &cluster1Client.Fake)
cluster1IngressCreateChan := RegisterFakeCopyOnCreate("ingresses", &cluster1Client.Fake, cluster1IngressWatch)
// cluster1IngressUpdateChan := RegisterFakeCopyOnUpdate("ingresses", &cluster1Client.Fake, cluster1IngressWatch)
cluster2Client := &fake_kubeclientset.Clientset{}
RegisterFakeList("ingresses", &cluster2Client.Fake, &extensions_v1beta1.IngressList{Items: []extensions_v1beta1.Ingress{}})
cluster2Client := &fakekubeclientset.Clientset{}
RegisterFakeList("ingresses", &cluster2Client.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}})
RegisterFakeList("configmaps", &cluster2Client.Fake, &fakeConfigMapList2)
cluster2IngressWatch := RegisterFakeWatch("ingresses", &cluster2Client.Fake)
cluster2ConfigMapWatch := RegisterFakeWatch("configmaps", &cluster2Client.Fake)
cluster2IngressCreateChan := RegisterFakeCopyOnCreate("ingresses", &cluster2Client.Fake, cluster2IngressWatch)
cluster2ConfigMapUpdateChan := RegisterFakeCopyOnUpdate("configmaps", &cluster2Client.Fake, cluster2ConfigMapWatch)
clientFactoryFunc := func(cluster *federation_api.Cluster) (kubeclientset.Interface, error) {
clientFactoryFunc := func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name {
case cluster1.Name:
return cluster1Client, nil
@ -102,8 +102,8 @@ func TestIngressController(t *testing.T) {
// TODO: Here we are creating the ingress with first cluster annotation.
// Add another test without that annotation when
// https://github.com/kubernetes/kubernetes/issues/36540 is fixed.
ing1 := extensions_v1beta1.Ingress{
ObjectMeta: api_v1.ObjectMeta{
ing1 := extensionsv1beta1.Ingress{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-ingress",
Namespace: "mynamespace",
SelfLink: "/api/v1/namespaces/mynamespace/ingress/test-ingress",
@ -111,9 +111,9 @@ func TestIngressController(t *testing.T) {
firstClusterAnnotation: cluster1.Name,
},
},
Status: extensions_v1beta1.IngressStatus{
LoadBalancer: api_v1.LoadBalancerStatus{
Ingress: make([]api_v1.LoadBalancerIngress, 0, 0),
Status: extensionsv1beta1.IngressStatus{
LoadBalancer: apiv1.LoadBalancerStatus{
Ingress: make([]apiv1.LoadBalancerIngress, 0, 0),
},
},
}
@ -139,7 +139,7 @@ func TestIngressController(t *testing.T) {
updatedIngress := GetIngressFromChan(t, fedIngressUpdateChan)
assert.True(t, ingressController.hasFinalizerFunc(updatedIngress, deletionhelper.FinalizerDeleteFromUnderlyingClusters))
updatedIngress = GetIngressFromChan(t, fedIngressUpdateChan)
assert.True(t, ingressController.hasFinalizerFunc(updatedIngress, api_v1.FinalizerOrphan), fmt.Sprintf("ingress does not have the orphan finalizer: %v", updatedIngress))
assert.True(t, ingressController.hasFinalizerFunc(updatedIngress, apiv1.FinalizerOrphan), fmt.Sprintf("ingress does not have the orphan finalizer: %v", updatedIngress))
ing1 = *updatedIngress
*/
t.Log("Checking that Ingress was correctly created in cluster 1")
@ -159,7 +159,7 @@ func TestIngressController(t *testing.T) {
// TODO: Re-enable this when we have fixed these flaky tests: https://github.com/kubernetes/kubernetes/issues/36540.
// Test that IP address gets transferred from cluster ingress to federated ingress.
t.Log("Checking that IP address gets transferred from cluster ingress to federated ingress")
createdIngress.Status.LoadBalancer.Ingress = append(createdIngress.Status.LoadBalancer.Ingress, api_v1.LoadBalancerIngress{IP: "1.2.3.4"})
createdIngress.Status.LoadBalancer.Ingress = append(createdIngress.Status.LoadBalancer.Ingress, apiv1.LoadBalancerIngress{IP: "1.2.3.4"})
cluster1IngressWatch.Modify(createdIngress)
// Wait for store to see the updated cluster ingress.
assert.NoError(t, WaitForStatusUpdate(t, ingressController.ingressFederatedInformer.GetTargetStore(),
@ -210,28 +210,28 @@ func TestIngressController(t *testing.T) {
close(stop)
}
func GetIngressFromChan(t *testing.T, c chan runtime.Object) *extensions_v1beta1.Ingress {
func GetIngressFromChan(t *testing.T, c chan runtime.Object) *extensionsv1beta1.Ingress {
obj := GetObjectFromChan(c)
ingress, ok := obj.(*extensions_v1beta1.Ingress)
ingress, ok := obj.(*extensionsv1beta1.Ingress)
if !ok {
t.Logf("Object on channel was not of type *extensions_v1beta1.Ingress: %v", obj)
t.Logf("Object on channel was not of type *extensionsv1beta1.Ingress: %v", obj)
}
return ingress
}
func GetConfigMapFromChan(c chan runtime.Object) *api_v1.ConfigMap {
configMap, _ := GetObjectFromChan(c).(*api_v1.ConfigMap)
func GetConfigMapFromChan(c chan runtime.Object) *apiv1.ConfigMap {
configMap, _ := GetObjectFromChan(c).(*apiv1.ConfigMap)
return configMap
}
func GetClusterFromChan(c chan runtime.Object) *federation_api.Cluster {
cluster, _ := GetObjectFromChan(c).(*federation_api.Cluster)
func GetClusterFromChan(c chan runtime.Object) *federationapi.Cluster {
cluster, _ := GetObjectFromChan(c).(*federationapi.Cluster)
return cluster
}
func NewConfigMap(uid string) *api_v1.ConfigMap {
return &api_v1.ConfigMap{
ObjectMeta: api_v1.ObjectMeta{
func NewConfigMap(uid string) *apiv1.ConfigMap {
return &apiv1.ConfigMap{
ObjectMeta: apiv1.ObjectMeta{
Name: uidConfigMapName,
Namespace: uidConfigMapNamespace,
SelfLink: "/api/v1/namespaces/" + uidConfigMapNamespace + "/configmap/" + uidConfigMapName,
@ -252,8 +252,8 @@ func WaitForFinalizersInFederationStore(ingressController *IngressController, st
if !found || err != nil {
return false, err
}
ingress := obj.(*extensions_v1beta1.Ingress)
if ingressController.hasFinalizerFunc(ingress, api_v1.FinalizerOrphan) &&
ingress := obj.(*extensionsv1beta1.Ingress)
if ingressController.hasFinalizerFunc(ingress, apiv1.FinalizerOrphan) &&
ingressController.hasFinalizerFunc(ingress, deletionhelper.FinalizerDeleteFromUnderlyingClusters) {
return true, nil
}
@ -280,14 +280,14 @@ func WaitForIngressInClusterStore(store util.FederatedReadOnlyStore, clusterName
}
// Wait for ingress status to be updated to match the desiredStatus.
func WaitForStatusUpdate(t *testing.T, store util.FederatedReadOnlyStore, clusterName, key string, desiredStatus api_v1.LoadBalancerStatus, timeout time.Duration) error {
func WaitForStatusUpdate(t *testing.T, store util.FederatedReadOnlyStore, clusterName, key string, desiredStatus apiv1.LoadBalancerStatus, timeout time.Duration) error {
retryInterval := 100 * time.Millisecond
err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
obj, found, err := store.GetByKey(clusterName, key)
if !found || err != nil {
return false, err
}
ingress := obj.(*extensions_v1beta1.Ingress)
ingress := obj.(*extensionsv1beta1.Ingress)
return reflect.DeepEqual(ingress.Status.LoadBalancer, desiredStatus), nil
})
return err

View File

@ -20,14 +20,14 @@ import (
"fmt"
"time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
@ -84,7 +84,7 @@ type NamespaceController struct {
func NewNamespaceController(client federationclientset.Interface) *NamespaceController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(api_v1.EventSource{Component: "federated-namespace-controller"})
recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-namespace-controller"})
nc := &NamespaceController{
federatedApiClient: client,
@ -103,31 +103,31 @@ func NewNamespaceController(client federationclientset.Interface) *NamespaceCont
// Start informer in federated API servers on namespaces that should be federated.
nc.namespaceInformerStore, nc.namespaceInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (runtime.Object, error) {
ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) {
return client.Core().Namespaces().List(options)
},
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return client.Core().Namespaces().Watch(options)
},
},
&api_v1.Namespace{},
&apiv1.Namespace{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj runtime.Object) { nc.deliverNamespaceObj(obj, 0, false) }))
// Federated informer on namespaces in members of federation.
nc.namespaceFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (runtime.Object, error) {
ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) {
return targetClient.Core().Namespaces().List(options)
},
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return targetClient.Core().Namespaces().Watch(options)
},
},
&api_v1.Namespace{},
&apiv1.Namespace{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some namespace opration succeeded.
@ -136,7 +136,7 @@ func NewNamespaceController(client federationclientset.Interface) *NamespaceCont
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) {
ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the namespaces again.
nc.clusterDeliverer.DeliverAfter(allClustersKey, nil, nc.clusterAvailableDelay)
},
@ -146,18 +146,18 @@ func NewNamespaceController(client federationclientset.Interface) *NamespaceCont
// Federated updeater along with Create/Update/Delete operations.
nc.federatedUpdater = util.NewFederatedUpdater(nc.namespaceFederatedInformer,
func(client kubeclientset.Interface, obj runtime.Object) error {
namespace := obj.(*api_v1.Namespace)
namespace := obj.(*apiv1.Namespace)
_, err := client.Core().Namespaces().Create(namespace)
return err
},
func(client kubeclientset.Interface, obj runtime.Object) error {
namespace := obj.(*api_v1.Namespace)
namespace := obj.(*apiv1.Namespace)
_, err := client.Core().Namespaces().Update(namespace)
return err
},
func(client kubeclientset.Interface, obj runtime.Object) error {
namespace := obj.(*api_v1.Namespace)
err := client.Core().Namespaces().Delete(namespace.Name, &api_v1.DeleteOptions{})
namespace := obj.(*apiv1.Namespace)
err := client.Core().Namespaces().Delete(namespace.Name, &apiv1.DeleteOptions{})
// IsNotFound error is fine since that means the object is deleted already.
if errors.IsNotFound(err) {
return nil
@ -171,7 +171,7 @@ func NewNamespaceController(client federationclientset.Interface) *NamespaceCont
nc.addFinalizerFunc,
// objNameFunc
func(obj runtime.Object) string {
namespace := obj.(*api_v1.Namespace)
namespace := obj.(*apiv1.Namespace)
return namespace.Name
},
nc.updateTimeout,
@ -184,7 +184,7 @@ func NewNamespaceController(client federationclientset.Interface) *NamespaceCont
// Returns true if the given object has the given finalizer in its ObjectMeta.
func (nc *NamespaceController) hasFinalizerFunc(obj runtime.Object, finalizer string) bool {
namespace := obj.(*api_v1.Namespace)
namespace := obj.(*apiv1.Namespace)
for i := range namespace.ObjectMeta.Finalizers {
if string(namespace.ObjectMeta.Finalizers[i]) == finalizer {
return true
@ -196,7 +196,7 @@ func (nc *NamespaceController) hasFinalizerFunc(obj runtime.Object, finalizer st
// Removes the finalizer from the given objects ObjectMeta.
// Assumes that the given object is a namespace.
func (nc *NamespaceController) removeFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) {
namespace := obj.(*api_v1.Namespace)
namespace := obj.(*apiv1.Namespace)
newFinalizers := []string{}
hasFinalizer := false
for i := range namespace.ObjectMeta.Finalizers {
@ -221,7 +221,7 @@ func (nc *NamespaceController) removeFinalizerFunc(obj runtime.Object, finalizer
// Adds the given finalizer to the given objects ObjectMeta.
// Assumes that the given object is a namespace.
func (nc *NamespaceController) addFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) {
namespace := obj.(*api_v1.Namespace)
namespace := obj.(*apiv1.Namespace)
namespace.ObjectMeta.Finalizers = append(namespace.ObjectMeta.Finalizers, finalizer)
namespace, err := nc.federatedApiClient.Core().Namespaces().Finalize(namespace)
if err != nil {
@ -231,8 +231,8 @@ func (nc *NamespaceController) addFinalizerFunc(obj runtime.Object, finalizer st
}
// Returns true if the given object has the given finalizer in its NamespaceSpec.
func (nc *NamespaceController) hasFinalizerFuncInSpec(obj runtime.Object, finalizer api_v1.FinalizerName) bool {
namespace := obj.(*api_v1.Namespace)
func (nc *NamespaceController) hasFinalizerFuncInSpec(obj runtime.Object, finalizer apiv1.FinalizerName) bool {
namespace := obj.(*apiv1.Namespace)
for i := range namespace.Spec.Finalizers {
if namespace.Spec.Finalizers[i] == finalizer {
return true
@ -242,8 +242,8 @@ func (nc *NamespaceController) hasFinalizerFuncInSpec(obj runtime.Object, finali
}
// Removes the finalizer from the given objects NamespaceSpec.
func (nc *NamespaceController) removeFinalizerFromSpec(namespace *api_v1.Namespace, finalizer api_v1.FinalizerName) (*api_v1.Namespace, error) {
updatedFinalizers := []api_v1.FinalizerName{}
func (nc *NamespaceController) removeFinalizerFromSpec(namespace *apiv1.Namespace, finalizer apiv1.FinalizerName) (*apiv1.Namespace, error) {
updatedFinalizers := []apiv1.FinalizerName{}
for i := range namespace.Spec.Finalizers {
if namespace.Spec.Finalizers[i] != finalizer {
updatedFinalizers = append(updatedFinalizers, namespace.Spec.Finalizers[i])
@ -275,7 +275,7 @@ func (nc *NamespaceController) Run(stopChan <-chan struct{}) {
}
func (nc *NamespaceController) deliverNamespaceObj(obj interface{}, delay time.Duration, failed bool) {
namespace := obj.(*api_v1.Namespace)
namespace := obj.(*apiv1.Namespace)
nc.deliverNamespace(namespace.Name, delay, failed)
}
@ -314,7 +314,7 @@ func (nc *NamespaceController) reconcileNamespacesOnClusterChange() {
nc.clusterDeliverer.DeliverAfter(allClustersKey, nil, nc.clusterAvailableDelay)
}
for _, obj := range nc.namespaceInformerStore.List() {
namespace := obj.(*api_v1.Namespace)
namespace := obj.(*apiv1.Namespace)
nc.deliverNamespace(namespace.Name, nc.smallDelay, false)
}
}
@ -339,7 +339,7 @@ func (nc *NamespaceController) reconcileNamespace(namespace string) {
// Create a copy before modifying the namespace to prevent race condition with
// other readers of namespace from store.
namespaceObj, err := conversion.NewCloner().DeepCopy(namespaceObjFromStore)
baseNamespace, ok := namespaceObj.(*api_v1.Namespace)
baseNamespace, ok := namespaceObj.(*apiv1.Namespace)
if err != nil || !ok {
glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err)
nc.deliverNamespace(namespace, 0, true)
@ -368,7 +368,7 @@ func (nc *NamespaceController) reconcileNamespace(namespace string) {
nc.deliverNamespace(namespace, 0, false)
return
}
baseNamespace = updatedNamespaceObj.(*api_v1.Namespace)
baseNamespace = updatedNamespaceObj.(*apiv1.Namespace)
glog.V(3).Infof("Syncing namespace %s in underlying clusters", baseNamespace.Name)
// Sync the namespace in all underlying clusters.
@ -388,9 +388,9 @@ func (nc *NamespaceController) reconcileNamespace(namespace string) {
return
}
// The object should not be modified.
desiredNamespace := &api_v1.Namespace{
desiredNamespace := &apiv1.Namespace{
ObjectMeta: util.DeepCopyRelevantObjectMeta(baseNamespace.ObjectMeta),
Spec: util.DeepCopyApiTypeOrPanic(baseNamespace.Spec).(api_v1.NamespaceSpec),
Spec: util.DeepCopyApiTypeOrPanic(baseNamespace.Spec).(apiv1.NamespaceSpec),
}
glog.V(5).Infof("Desired namespace in underlying clusters: %+v", desiredNamespace)
@ -404,7 +404,7 @@ func (nc *NamespaceController) reconcileNamespace(namespace string) {
ClusterName: cluster.Name,
})
} else {
clusterNamespace := clusterNamespaceObj.(*api_v1.Namespace)
clusterNamespace := clusterNamespaceObj.(*apiv1.Namespace)
// Update existing namespace, if needed.
if !util.ObjectMetaAndSpecEquivalent(desiredNamespace, clusterNamespace) {
@ -441,17 +441,17 @@ func (nc *NamespaceController) reconcileNamespace(namespace string) {
}
// delete deletes the given namespace or returns error if the deletion was not complete.
func (nc *NamespaceController) delete(namespace *api_v1.Namespace) error {
func (nc *NamespaceController) delete(namespace *apiv1.Namespace) error {
// Set Terminating status.
updatedNamespace := &api_v1.Namespace{
updatedNamespace := &apiv1.Namespace{
ObjectMeta: namespace.ObjectMeta,
Spec: namespace.Spec,
Status: api_v1.NamespaceStatus{
Phase: api_v1.NamespaceTerminating,
Status: apiv1.NamespaceStatus{
Phase: apiv1.NamespaceTerminating,
},
}
var err error
if namespace.Status.Phase != api_v1.NamespaceTerminating {
if namespace.Status.Phase != apiv1.NamespaceTerminating {
glog.V(2).Infof("Marking ns %s as terminating", namespace.Name)
nc.eventRecorder.Event(namespace, api.EventTypeNormal, "DeleteNamespace", fmt.Sprintf("Marking for deletion"))
_, err = nc.federatedApiClient.Core().Namespaces().Update(updatedNamespace)
@ -460,7 +460,7 @@ func (nc *NamespaceController) delete(namespace *api_v1.Namespace) error {
}
}
if nc.hasFinalizerFuncInSpec(updatedNamespace, api_v1.FinalizerKubernetes) {
if nc.hasFinalizerFuncInSpec(updatedNamespace, apiv1.FinalizerKubernetes) {
// Delete resources in this namespace.
updatedNamespace, err = nc.removeKubernetesFinalizer(updatedNamespace)
if err != nil {
@ -488,42 +488,42 @@ func (nc *NamespaceController) delete(namespace *api_v1.Namespace) error {
}
// Ensures that all resources in this namespace are deleted and then removes the kubernetes finalizer.
func (nc *NamespaceController) removeKubernetesFinalizer(namespace *api_v1.Namespace) (*api_v1.Namespace, error) {
func (nc *NamespaceController) removeKubernetesFinalizer(namespace *apiv1.Namespace) (*apiv1.Namespace, error) {
// Right now there are just 7 types of objects: Deployments, DaemonSets, ReplicaSet, Secret, Ingress, Events and Service.
// Temporarly these items are simply deleted one by one to squeeze this code into 1.4.
// TODO: Make it generic (like in the regular namespace controller) and parallel.
err := nc.federatedApiClient.Core().Services(namespace.Name).DeleteCollection(&api_v1.DeleteOptions{}, api_v1.ListOptions{})
err := nc.federatedApiClient.Core().Services(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to delete service list: %v", err)
}
err = nc.federatedApiClient.Extensions().ReplicaSets(namespace.Name).DeleteCollection(&api_v1.DeleteOptions{}, api_v1.ListOptions{})
err = nc.federatedApiClient.Extensions().ReplicaSets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to delete replicaset list from namespace: %v", err)
}
err = nc.federatedApiClient.Core().Secrets(namespace.Name).DeleteCollection(&api_v1.DeleteOptions{}, api_v1.ListOptions{})
err = nc.federatedApiClient.Core().Secrets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to delete secret list from namespace: %v", err)
}
err = nc.federatedApiClient.Extensions().Ingresses(namespace.Name).DeleteCollection(&api_v1.DeleteOptions{}, api_v1.ListOptions{})
err = nc.federatedApiClient.Extensions().Ingresses(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to delete ingresses list from namespace: %v", err)
}
err = nc.federatedApiClient.Extensions().DaemonSets(namespace.Name).DeleteCollection(&api_v1.DeleteOptions{}, api_v1.ListOptions{})
err = nc.federatedApiClient.Extensions().DaemonSets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to delete daemonsets list from namespace: %v", err)
}
err = nc.federatedApiClient.Extensions().Deployments(namespace.Name).DeleteCollection(&api_v1.DeleteOptions{}, api_v1.ListOptions{})
err = nc.federatedApiClient.Extensions().Deployments(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to delete deployments list from namespace: %v", err)
}
err = nc.federatedApiClient.Core().Events(namespace.Name).DeleteCollection(&api_v1.DeleteOptions{}, api_v1.ListOptions{})
err = nc.federatedApiClient.Core().Events(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to delete events list from namespace: %v", err)
}
// Remove kube_api.FinalizerKubernetes
if len(namespace.Spec.Finalizers) != 0 {
return nc.removeFinalizerFromSpec(namespace, api_v1.FinalizerKubernetes)
return nc.removeFinalizerFromSpec(namespace, apiv1.FinalizerKubernetes)
}
return namespace, nil
}

View File

@ -21,16 +21,16 @@ import (
"testing"
"time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fake_fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
"k8s.io/kubernetes/pkg/api/unversioned"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
@ -39,51 +39,51 @@ import (
)
func TestNamespaceController(t *testing.T) {
cluster1 := NewCluster("cluster1", api_v1.ConditionTrue)
cluster2 := NewCluster("cluster2", api_v1.ConditionTrue)
ns1 := api_v1.Namespace{
ObjectMeta: api_v1.ObjectMeta{
cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
ns1 := apiv1.Namespace{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-namespace",
SelfLink: "/api/v1/namespaces/test-namespace",
},
Spec: api_v1.NamespaceSpec{
Finalizers: []api_v1.FinalizerName{api_v1.FinalizerKubernetes},
Spec: apiv1.NamespaceSpec{
Finalizers: []apiv1.FinalizerName{apiv1.FinalizerKubernetes},
},
}
fakeClient := &fake_fedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federation_api.ClusterList{Items: []federation_api.Cluster{*cluster1}})
RegisterFakeList("namespaces", &fakeClient.Fake, &api_v1.NamespaceList{Items: []api_v1.Namespace{}})
fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federationapi.ClusterList{Items: []federationapi.Cluster{*cluster1}})
RegisterFakeList("namespaces", &fakeClient.Fake, &apiv1.NamespaceList{Items: []apiv1.Namespace{}})
namespaceWatch := RegisterFakeWatch("namespaces", &fakeClient.Fake)
namespaceCreateChan := RegisterFakeCopyOnCreate("namespaces", &fakeClient.Fake, namespaceWatch)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fake_kubeclientset.Clientset{}
cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("namespaces", &cluster1Client.Fake)
RegisterFakeList("namespaces", &cluster1Client.Fake, &api_v1.NamespaceList{Items: []api_v1.Namespace{}})
RegisterFakeList("namespaces", &cluster1Client.Fake, &apiv1.NamespaceList{Items: []apiv1.Namespace{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("namespaces", &cluster1Client.Fake, cluster1Watch)
// cluster1UpdateChan := RegisterFakeCopyOnUpdate("namespaces", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fake_kubeclientset.Clientset{}
cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("namespaces", &cluster2Client.Fake)
RegisterFakeList("namespaces", &cluster2Client.Fake, &api_v1.NamespaceList{Items: []api_v1.Namespace{}})
RegisterFakeList("namespaces", &cluster2Client.Fake, &apiv1.NamespaceList{Items: []apiv1.Namespace{}})
cluster2CreateChan := RegisterFakeCopyOnCreate("namespaces", &cluster2Client.Fake, cluster2Watch)
RegisterFakeList("replicasets", &fakeClient.Fake, &extensionsv1.ReplicaSetList{Items: []extensionsv1.ReplicaSet{
{
ObjectMeta: api_v1.ObjectMeta{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-rs",
Namespace: ns1.Namespace,
}}}})
RegisterFakeList("secrets", &fakeClient.Fake, &api_v1.SecretList{Items: []api_v1.Secret{
RegisterFakeList("secrets", &fakeClient.Fake, &apiv1.SecretList{Items: []apiv1.Secret{
{
ObjectMeta: api_v1.ObjectMeta{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-secret",
Namespace: ns1.Namespace,
}}}})
RegisterFakeList("services", &fakeClient.Fake, &api_v1.ServiceList{Items: []api_v1.Service{
RegisterFakeList("services", &fakeClient.Fake, &apiv1.ServiceList{Items: []apiv1.Service{
{
ObjectMeta: api_v1.ObjectMeta{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-service",
Namespace: ns1.Namespace,
}}}})
@ -93,7 +93,7 @@ func TestNamespaceController(t *testing.T) {
secretDeleteChan := RegisterDeleteCollection(&fakeClient.Fake, "secrets")
namespaceController := NewNamespaceController(fakeClient)
informerClientFactory := func(cluster *federation_api.Cluster) (kubeclientset.Interface, error) {
informerClientFactory := func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name {
case cluster1.Name:
return cluster1Client, nil
@ -155,7 +155,7 @@ func TestNamespaceController(t *testing.T) {
// Delete the namespace with orphan finalizer (let namespaces
// in underlying clusters be as is).
// TODO: Add a test without orphan finalizer.
ns1.ObjectMeta.Finalizers = append(ns1.ObjectMeta.Finalizers, api_v1.FinalizerOrphan)
ns1.ObjectMeta.Finalizers = append(ns1.ObjectMeta.Finalizers, apiv1.FinalizerOrphan)
ns1.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
namespaceWatch.Modify(&ns1)
assert.Equal(t, ns1.Name, GetStringFromChan(nsDeleteChan))
@ -166,7 +166,7 @@ func TestNamespaceController(t *testing.T) {
close(stop)
}
func setClientFactory(informer util.FederatedInformer, informerClientFactory func(*federation_api.Cluster) (kubeclientset.Interface, error)) {
func setClientFactory(informer util.FederatedInformer, informerClientFactory func(*federationapi.Cluster) (kubeclientset.Interface, error)) {
testInformer := ToFederatedInformerForTestOnly(informer)
testInformer.SetClientFactory(informerClientFactory)
}
@ -199,8 +199,8 @@ func GetStringFromChan(c chan string) string {
}
}
func GetNamespaceFromChan(c chan runtime.Object) *api_v1.Namespace {
namespace := GetObjectFromChan(c).(*api_v1.Namespace)
func GetNamespaceFromChan(c chan runtime.Object) *apiv1.Namespace {
namespace := GetObjectFromChan(c).(*apiv1.Namespace)
return namespace
}

View File

@ -20,20 +20,20 @@ import (
"fmt"
"time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/conversion"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/watch"
@ -85,7 +85,7 @@ type SecretController struct {
func NewSecretController(client federationclientset.Interface) *SecretController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(api_v1.EventSource{Component: "federated-secrets-controller"})
recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-secrets-controller"})
secretcontroller := &SecretController{
federatedApiClient: client,
@ -104,43 +104,43 @@ func NewSecretController(client federationclientset.Interface) *SecretController
// Start informer in federated API servers on secrets that should be federated.
secretcontroller.secretInformerStore, secretcontroller.secretInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) {
return client.Core().Secrets(api_v1.NamespaceAll).List(options)
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return client.Core().Secrets(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
return client.Core().Secrets(api_v1.NamespaceAll).Watch(options)
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return client.Core().Secrets(apiv1.NamespaceAll).Watch(options)
},
},
&api_v1.Secret{},
&apiv1.Secret{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { secretcontroller.deliverSecretObj(obj, 0, false) }))
util.NewTriggerOnAllChanges(func(obj pkgruntime.Object) { secretcontroller.deliverSecretObj(obj, 0, false) }))
// Federated informer on secrets in members of federation.
secretcontroller.secretFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) {
return targetClient.Core().Secrets(api_v1.NamespaceAll).List(options)
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Core().Secrets(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
return targetClient.Core().Secrets(api_v1.NamespaceAll).Watch(options)
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return targetClient.Core().Secrets(apiv1.NamespaceAll).Watch(options)
},
},
&api_v1.Secret{},
&apiv1.Secret{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some secret opration succeeded.
util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) {
func(obj pkgruntime.Object) {
secretcontroller.deliverSecretObj(obj, secretcontroller.secretReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) {
ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the secrets again.
secretcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(secretcontroller.clusterAvailableDelay))
},
@ -149,19 +149,19 @@ func NewSecretController(client federationclientset.Interface) *SecretController
// Federated updeater along with Create/Update/Delete operations.
secretcontroller.federatedUpdater = util.NewFederatedUpdater(secretcontroller.secretFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
secret := obj.(*api_v1.Secret)
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
secret := obj.(*apiv1.Secret)
_, err := client.Core().Secrets(secret.Namespace).Create(secret)
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
secret := obj.(*api_v1.Secret)
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
secret := obj.(*apiv1.Secret)
_, err := client.Core().Secrets(secret.Namespace).Update(secret)
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
secret := obj.(*api_v1.Secret)
err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &api_v1.DeleteOptions{})
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
secret := obj.(*apiv1.Secret)
err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &apiv1.DeleteOptions{})
return err
})
@ -170,8 +170,8 @@ func NewSecretController(client federationclientset.Interface) *SecretController
secretcontroller.removeFinalizerFunc,
secretcontroller.addFinalizerFunc,
// objNameFunc
func(obj pkg_runtime.Object) string {
secret := obj.(*api_v1.Secret)
func(obj pkgruntime.Object) string {
secret := obj.(*apiv1.Secret)
return secret.Name
},
secretcontroller.updateTimeout,
@ -184,8 +184,8 @@ func NewSecretController(client federationclientset.Interface) *SecretController
}
// Returns true if the given object has the given finalizer in its ObjectMeta.
func (secretcontroller *SecretController) hasFinalizerFunc(obj pkg_runtime.Object, finalizer string) bool {
secret := obj.(*api_v1.Secret)
func (secretcontroller *SecretController) hasFinalizerFunc(obj pkgruntime.Object, finalizer string) bool {
secret := obj.(*apiv1.Secret)
for i := range secret.ObjectMeta.Finalizers {
if string(secret.ObjectMeta.Finalizers[i]) == finalizer {
return true
@ -196,8 +196,8 @@ func (secretcontroller *SecretController) hasFinalizerFunc(obj pkg_runtime.Objec
// Removes the finalizer from the given objects ObjectMeta.
// Assumes that the given object is a secret.
func (secretcontroller *SecretController) removeFinalizerFunc(obj pkg_runtime.Object, finalizer string) (pkg_runtime.Object, error) {
secret := obj.(*api_v1.Secret)
func (secretcontroller *SecretController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
secret := obj.(*apiv1.Secret)
newFinalizers := []string{}
hasFinalizer := false
for i := range secret.ObjectMeta.Finalizers {
@ -221,8 +221,8 @@ func (secretcontroller *SecretController) removeFinalizerFunc(obj pkg_runtime.Ob
// Adds the given finalizer to the given objects ObjectMeta.
// Assumes that the given object is a secret.
func (secretcontroller *SecretController) addFinalizerFunc(obj pkg_runtime.Object, finalizer string) (pkg_runtime.Object, error) {
secret := obj.(*api_v1.Secret)
func (secretcontroller *SecretController) addFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
secret := obj.(*apiv1.Secret)
secret.ObjectMeta.Finalizers = append(secret.ObjectMeta.Finalizers, finalizer)
secret, err := secretcontroller.federatedApiClient.Core().Secrets(secret.Namespace).Update(secret)
if err != nil {
@ -249,7 +249,7 @@ func (secretcontroller *SecretController) Run(stopChan <-chan struct{}) {
}
func (secretcontroller *SecretController) deliverSecretObj(obj interface{}, delay time.Duration, failed bool) {
secret := obj.(*api_v1.Secret)
secret := obj.(*apiv1.Secret)
secretcontroller.deliverSecret(types.NamespacedName{Namespace: secret.Namespace, Name: secret.Name}, delay, failed)
}
@ -289,7 +289,7 @@ func (secretcontroller *SecretController) reconcileSecretsOnClusterChange() {
secretcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(secretcontroller.clusterAvailableDelay))
}
for _, obj := range secretcontroller.secretInformerStore.List() {
secret := obj.(*api_v1.Secret)
secret := obj.(*apiv1.Secret)
secretcontroller.deliverSecret(types.NamespacedName{Namespace: secret.Namespace, Name: secret.Name}, secretcontroller.smallDelay, false)
}
}
@ -316,7 +316,7 @@ func (secretcontroller *SecretController) reconcileSecret(secret types.Namespace
// Create a copy before modifying the obj to prevent race condition with
// other readers of obj from store.
baseSecretObj, err := conversion.NewCloner().DeepCopy(baseSecretObjFromStore)
baseSecret, ok := baseSecretObj.(*api_v1.Secret)
baseSecret, ok := baseSecretObj.(*apiv1.Secret)
if err != nil || !ok {
glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err)
secretcontroller.deliverSecret(secret, 0, true)
@ -342,7 +342,7 @@ func (secretcontroller *SecretController) reconcileSecret(secret types.Namespace
secretcontroller.deliverSecret(secret, 0, false)
return
}
baseSecret = updatedSecretObj.(*api_v1.Secret)
baseSecret = updatedSecretObj.(*apiv1.Secret)
glog.V(3).Infof("Syncing secret %s in underlying clusters", baseSecret.Name)
@ -363,7 +363,7 @@ func (secretcontroller *SecretController) reconcileSecret(secret types.Namespace
}
// The data should not be modified.
desiredSecret := &api_v1.Secret{
desiredSecret := &apiv1.Secret{
ObjectMeta: util.DeepCopyRelevantObjectMeta(baseSecret.ObjectMeta),
Data: baseSecret.Data,
Type: baseSecret.Type,
@ -379,7 +379,7 @@ func (secretcontroller *SecretController) reconcileSecret(secret types.Namespace
ClusterName: cluster.Name,
})
} else {
clusterSecret := clusterSecretObj.(*api_v1.Secret)
clusterSecret := clusterSecretObj.(*apiv1.Secret)
// Update existing secret, if needed.
if !util.SecretEquivalent(*desiredSecret, *clusterSecret) {
@ -416,7 +416,7 @@ func (secretcontroller *SecretController) reconcileSecret(secret types.Namespace
}
// delete deletes the given secret or returns error if the deletion was not complete.
func (secretcontroller *SecretController) delete(secret *api_v1.Secret) error {
func (secretcontroller *SecretController) delete(secret *apiv1.Secret) error {
glog.V(3).Infof("Handling deletion of secret: %v", *secret)
_, err := secretcontroller.deletionHelper.HandleObjectInUnderlyingClusters(secret)
if err != nil {

View File

@ -22,14 +22,14 @@ import (
"testing"
"time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fake_fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait"
@ -38,29 +38,29 @@ import (
)
func TestSecretController(t *testing.T) {
cluster1 := NewCluster("cluster1", api_v1.ConditionTrue)
cluster2 := NewCluster("cluster2", api_v1.ConditionTrue)
cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
fakeClient := &fake_fedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federation_api.ClusterList{Items: []federation_api.Cluster{*cluster1}})
RegisterFakeList("secrets", &fakeClient.Fake, &api_v1.SecretList{Items: []api_v1.Secret{}})
fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federationapi.ClusterList{Items: []federationapi.Cluster{*cluster1}})
RegisterFakeList("secrets", &fakeClient.Fake, &apiv1.SecretList{Items: []apiv1.Secret{}})
secretWatch := RegisterFakeWatch("secrets", &fakeClient.Fake)
secretUpdateChan := RegisterFakeCopyOnUpdate("secrets", &fakeClient.Fake, secretWatch)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fake_kubeclientset.Clientset{}
cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("secrets", &cluster1Client.Fake)
RegisterFakeList("secrets", &cluster1Client.Fake, &api_v1.SecretList{Items: []api_v1.Secret{}})
RegisterFakeList("secrets", &cluster1Client.Fake, &apiv1.SecretList{Items: []apiv1.Secret{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("secrets", &cluster1Client.Fake, cluster1Watch)
// cluster1UpdateChan := RegisterFakeCopyOnUpdate("secrets", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fake_kubeclientset.Clientset{}
cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("secrets", &cluster2Client.Fake)
RegisterFakeList("secrets", &cluster2Client.Fake, &api_v1.SecretList{Items: []api_v1.Secret{}})
RegisterFakeList("secrets", &cluster2Client.Fake, &apiv1.SecretList{Items: []apiv1.Secret{}})
cluster2CreateChan := RegisterFakeCopyOnCreate("secrets", &cluster2Client.Fake, cluster2Watch)
secretController := NewSecretController(fakeClient)
informerClientFactory := func(cluster *federation_api.Cluster) (kubeclientset.Interface, error) {
informerClientFactory := func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name {
case cluster1.Name:
return cluster1Client, nil
@ -80,8 +80,8 @@ func TestSecretController(t *testing.T) {
stop := make(chan struct{})
secretController.Run(stop)
secret1 := api_v1.Secret{
ObjectMeta: api_v1.ObjectMeta{
secret1 := apiv1.Secret{
ObjectMeta: apiv1.ObjectMeta{
Name: "test-secret",
Namespace: "ns",
SelfLink: "/api/v1/namespaces/ns/secrets/test-secret",
@ -90,7 +90,7 @@ func TestSecretController(t *testing.T) {
"A": []byte("ala ma kota"),
"B": []byte("quick brown fox"),
},
Type: api_v1.SecretTypeOpaque,
Type: apiv1.SecretTypeOpaque,
}
// Test add federated secret.
@ -99,7 +99,7 @@ func TestSecretController(t *testing.T) {
updatedSecret := GetSecretFromChan(secretUpdateChan)
assert.True(t, secretController.hasFinalizerFunc(updatedSecret, deletionhelper.FinalizerDeleteFromUnderlyingClusters))
updatedSecret = GetSecretFromChan(secretUpdateChan)
assert.True(t, secretController.hasFinalizerFunc(updatedSecret, api_v1.FinalizerOrphan))
assert.True(t, secretController.hasFinalizerFunc(updatedSecret, apiv1.FinalizerOrphan))
secret1 = *updatedSecret
// Verify that the secret is created in underlying cluster1.
@ -161,12 +161,12 @@ func TestSecretController(t *testing.T) {
close(stop)
}
func setClientFactory(informer util.FederatedInformer, informerClientFactory func(*federation_api.Cluster) (kubeclientset.Interface, error)) {
func setClientFactory(informer util.FederatedInformer, informerClientFactory func(*federationapi.Cluster) (kubeclientset.Interface, error)) {
testInformer := ToFederatedInformerForTestOnly(informer)
testInformer.SetClientFactory(informerClientFactory)
}
func secretsEqual(a, b api_v1.Secret) bool {
func secretsEqual(a, b apiv1.Secret) bool {
// Clear the SelfLink and ObjectMeta.Finalizers since they will be different
// in resoure in federation control plane and resource in underlying cluster.
a.SelfLink = ""
@ -176,20 +176,20 @@ func secretsEqual(a, b api_v1.Secret) bool {
return reflect.DeepEqual(a, b)
}
func GetSecretFromChan(c chan runtime.Object) *api_v1.Secret {
secret := GetObjectFromChan(c).(*api_v1.Secret)
func GetSecretFromChan(c chan runtime.Object) *apiv1.Secret {
secret := GetObjectFromChan(c).(*apiv1.Secret)
return secret
}
// Wait till the store is updated with latest secret.
func WaitForSecretStoreUpdate(store util.FederatedReadOnlyStore, clusterName, key string, desiredSecret *api_v1.Secret, timeout time.Duration) error {
func WaitForSecretStoreUpdate(store util.FederatedReadOnlyStore, clusterName, key string, desiredSecret *apiv1.Secret, timeout time.Duration) error {
retryInterval := 100 * time.Millisecond
err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
obj, found, err := store.GetByKey(clusterName, key)
if !found || err != nil {
return false, err
}
equal := secretsEqual(*obj.(*api_v1.Secret), *desiredSecret)
equal := secretsEqual(*obj.(*apiv1.Secret), *desiredSecret)
return equal, err
})
return err

View File

@ -24,7 +24,7 @@ import (
cache "k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/workqueue"
"k8s.io/kubernetes/pkg/watch"
@ -91,7 +91,7 @@ func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterNa
}
cachedClusterClient.endpointStore.Store, cachedClusterClient.endpointController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) {
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return clientset.Core().Endpoints(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
@ -115,7 +115,7 @@ func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterNa
cachedClusterClient.serviceStore.Indexer, cachedClusterClient.serviceController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) {
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return clientset.Core().Services(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {

View File

@ -34,7 +34,7 @@ import (
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
@ -164,7 +164,7 @@ func New(federationClient fedclientset.Interface, dns dnsprovider.Interface,
}
s.serviceStore.Indexer, s.serviceController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) {
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return s.federationClient.Core().Services(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
@ -187,7 +187,7 @@ func New(federationClient fedclientset.Interface, dns dnsprovider.Interface,
)
s.clusterStore.Store, s.clusterController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) {
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return s.federationClient.Federation().Clusters().List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {

View File

@ -19,9 +19,9 @@ package eventsink
import (
"testing"
fake_fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
@ -29,7 +29,7 @@ import (
)
func TestEventSink(t *testing.T) {
fakeFederationClient := &fake_fedclientset.Clientset{}
fakeFederationClient := &fakefedclientset.Clientset{}
createdChan := make(chan runtime.Object, 100)
fakeFederationClient.AddReactor("create", "events", func(action core.Action) (bool, runtime.Object, error) {
createAction := action.(core.CreateAction)
@ -45,8 +45,8 @@ func TestEventSink(t *testing.T) {
return true, obj, nil
})
event := api_v1.Event{
ObjectMeta: api_v1.ObjectMeta{
event := apiv1.Event{
ObjectMeta: apiv1.ObjectMeta{
Name: "bzium",
Namespace: "ns",
},
@ -54,7 +54,7 @@ func TestEventSink(t *testing.T) {
sink := NewFederatedEventSink(fakeFederationClient)
eventUpdated, err := sink.Create(&event)
assert.NoError(t, err)
eventV1 := GetObjectFromChan(createdChan).(*api_v1.Event)
eventV1 := GetObjectFromChan(createdChan).(*apiv1.Event)
assert.NotNil(t, eventV1)
// Just some simple sanity checks.
assert.Equal(t, event.Name, eventV1.Name)
@ -62,7 +62,7 @@ func TestEventSink(t *testing.T) {
eventUpdated, err = sink.Update(&event)
assert.NoError(t, err)
eventV1 = GetObjectFromChan(updateChan).(*api_v1.Event)
eventV1 = GetObjectFromChan(updateChan).(*apiv1.Event)
assert.NotNil(t, eventV1)
// Just some simple sanity checks.
assert.Equal(t, event.Name, eventV1.Name)

View File

@ -22,13 +22,13 @@ import (
"sync"
"time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch"
"github.com/golang/glog"
@ -68,7 +68,7 @@ type FederatedReadOnlyStore interface {
// issues occur less often. All users of the interface should assume
// that there may be significant delays in content updates of all kinds and write their
// code that it doesn't break if something is slightly out-of-sync.
ClustersSynced(clusters []*federation_api.Cluster) bool
ClustersSynced(clusters []*federationapi.Cluster) bool
}
// An interface to access federation members and clients.
@ -77,13 +77,13 @@ type FederationView interface {
GetClientsetForCluster(clusterName string) (kubeclientset.Interface, error)
// GetUnreadyClusters returns a list of all clusters that are not ready yet.
GetUnreadyClusters() ([]*federation_api.Cluster, error)
GetUnreadyClusters() ([]*federationapi.Cluster, error)
// GetReadyClusers returns all clusters for which the sub-informers are run.
GetReadyClusters() ([]*federation_api.Cluster, error)
GetReadyClusters() ([]*federationapi.Cluster, error)
// GetReadyCluster returns the cluster with the given name, if found.
GetReadyCluster(name string) (*federation_api.Cluster, bool, error)
GetReadyCluster(name string) (*federationapi.Cluster, bool, error)
// ClustersSynced returns true if the view is synced (for the first time).
ClustersSynced() bool
@ -111,12 +111,12 @@ type FederatedInformer interface {
type FederatedInformerForTestOnly interface {
FederatedInformer
SetClientFactory(func(*federation_api.Cluster) (kubeclientset.Interface, error))
SetClientFactory(func(*federationapi.Cluster) (kubeclientset.Interface, error))
}
// A function that should be used to create an informer on the target object. Store should use
// cache.DeletionHandlingMetaNamespaceKeyFunc as a keying function.
type TargetInformerFactory func(*federation_api.Cluster, kubeclientset.Interface) (cache.Store, cache.ControllerInterface)
type TargetInformerFactory func(*federationapi.Cluster, kubeclientset.Interface) (cache.Store, cache.ControllerInterface)
// A structure with cluster lifecycle handler functions. Cluster is available (and ClusterAvailable is fired)
// when it is created in federated etcd and ready. Cluster becomes unavailable (and ClusterUnavailable is fired)
@ -124,10 +124,10 @@ type TargetInformerFactory func(*federation_api.Cluster, kubeclientset.Interface
// and ClusterUnavailable are fired.
type ClusterLifecycleHandlerFuncs struct {
// Fired when the cluster becomes available.
ClusterAvailable func(*federation_api.Cluster)
ClusterAvailable func(*federationapi.Cluster)
// Fired when the cluster becomes unavailable. The second arg contains data that was present
// in the cluster before deletion.
ClusterUnavailable func(*federation_api.Cluster, []interface{})
ClusterUnavailable func(*federationapi.Cluster, []interface{})
}
// Builds a FederatedInformer for the given federation client and factory.
@ -138,7 +138,7 @@ func NewFederatedInformer(
federatedInformer := &federatedInformerImpl{
targetInformerFactory: targetInformerFactory,
clientFactory: func(cluster *federation_api.Cluster) (kubeclientset.Interface, error) {
clientFactory: func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
clusterConfig, err := BuildClusterConfig(cluster)
if err == nil && clusterConfig != nil {
clientset := kubeclientset.NewForConfigOrDie(restclient.AddUserAgent(clusterConfig, userAgentName))
@ -160,18 +160,18 @@ func NewFederatedInformer(
federatedInformer.clusterInformer.store, federatedInformer.clusterInformer.controller = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) {
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return federationClient.Federation().Clusters().List(options)
},
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return federationClient.Federation().Clusters().Watch(options)
},
},
&federation_api.Cluster{},
&federationapi.Cluster{},
clusterSyncPeriod,
cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) {
oldCluster, ok := old.(*federation_api.Cluster)
oldCluster, ok := old.(*federationapi.Cluster)
if ok {
var data []interface{}
if clusterLifecycle.ClusterUnavailable != nil {
@ -184,7 +184,7 @@ func NewFederatedInformer(
}
},
AddFunc: func(cur interface{}) {
curCluster, ok := cur.(*federation_api.Cluster)
curCluster, ok := cur.(*federationapi.Cluster)
if ok && isClusterReady(curCluster) {
federatedInformer.addCluster(curCluster)
if clusterLifecycle.ClusterAvailable != nil {
@ -195,12 +195,12 @@ func NewFederatedInformer(
}
},
UpdateFunc: func(old, cur interface{}) {
oldCluster, ok := old.(*federation_api.Cluster)
oldCluster, ok := old.(*federationapi.Cluster)
if !ok {
glog.Errorf("Internal error: Cluster %v not updated. Old cluster not of correct type.", old)
return
}
curCluster, ok := cur.(*federation_api.Cluster)
curCluster, ok := cur.(*federationapi.Cluster)
if !ok {
glog.Errorf("Internal error: Cluster %v not updated. New cluster not of correct type.", cur)
return
@ -230,10 +230,10 @@ func NewFederatedInformer(
return federatedInformer
}
func isClusterReady(cluster *federation_api.Cluster) bool {
func isClusterReady(cluster *federationapi.Cluster) bool {
for _, condition := range cluster.Status.Conditions {
if condition.Type == federation_api.ClusterReady {
if condition.Status == api_v1.ConditionTrue {
if condition.Type == federationapi.ClusterReady {
if condition.Status == apiv1.ConditionTrue {
return true
}
}
@ -260,7 +260,7 @@ type federatedInformerImpl struct {
targetInformers map[string]informer
// A function to build clients.
clientFactory func(*federation_api.Cluster) (kubeclientset.Interface, error)
clientFactory func(*federationapi.Cluster) (kubeclientset.Interface, error)
}
// *federatedInformerImpl implements FederatedInformer interface.
@ -291,7 +291,7 @@ func (f *federatedInformerImpl) Start() {
go f.clusterInformer.controller.Run(f.clusterInformer.stopChan)
}
func (f *federatedInformerImpl) SetClientFactory(clientFactory func(*federation_api.Cluster) (kubeclientset.Interface, error)) {
func (f *federatedInformerImpl) SetClientFactory(clientFactory func(*federationapi.Cluster) (kubeclientset.Interface, error)) {
f.Lock()
defer f.Unlock()
@ -319,14 +319,14 @@ func (f *federatedInformerImpl) getClientsetForClusterUnlocked(clusterName strin
return nil, fmt.Errorf("cluster %q not found", clusterName)
}
func (f *federatedInformerImpl) GetUnreadyClusters() ([]*federation_api.Cluster, error) {
func (f *federatedInformerImpl) GetUnreadyClusters() ([]*federationapi.Cluster, error) {
f.Lock()
defer f.Unlock()
items := f.clusterInformer.store.List()
result := make([]*federation_api.Cluster, 0, len(items))
result := make([]*federationapi.Cluster, 0, len(items))
for _, item := range items {
if cluster, ok := item.(*federation_api.Cluster); ok {
if cluster, ok := item.(*federationapi.Cluster); ok {
if !isClusterReady(cluster) {
result = append(result, cluster)
}
@ -338,14 +338,14 @@ func (f *federatedInformerImpl) GetUnreadyClusters() ([]*federation_api.Cluster,
}
// GetReadyClusers returns all clusters for which the sub-informers are run.
func (f *federatedInformerImpl) GetReadyClusters() ([]*federation_api.Cluster, error) {
func (f *federatedInformerImpl) GetReadyClusters() ([]*federationapi.Cluster, error) {
f.Lock()
defer f.Unlock()
items := f.clusterInformer.store.List()
result := make([]*federation_api.Cluster, 0, len(items))
result := make([]*federationapi.Cluster, 0, len(items))
for _, item := range items {
if cluster, ok := item.(*federation_api.Cluster); ok {
if cluster, ok := item.(*federationapi.Cluster); ok {
if isClusterReady(cluster) {
result = append(result, cluster)
}
@ -357,15 +357,15 @@ func (f *federatedInformerImpl) GetReadyClusters() ([]*federation_api.Cluster, e
}
// GetCluster returns the cluster with the given name, if found.
func (f *federatedInformerImpl) GetReadyCluster(name string) (*federation_api.Cluster, bool, error) {
func (f *federatedInformerImpl) GetReadyCluster(name string) (*federationapi.Cluster, bool, error) {
f.Lock()
defer f.Unlock()
return f.getReadyClusterUnlocked(name)
}
func (f *federatedInformerImpl) getReadyClusterUnlocked(name string) (*federation_api.Cluster, bool, error) {
func (f *federatedInformerImpl) getReadyClusterUnlocked(name string) (*federationapi.Cluster, bool, error) {
if obj, exist, err := f.clusterInformer.store.GetByKey(name); exist && err == nil {
if cluster, ok := obj.(*federation_api.Cluster); ok {
if cluster, ok := obj.(*federationapi.Cluster); ok {
if isClusterReady(cluster) {
return cluster, true, nil
}
@ -385,7 +385,7 @@ func (f *federatedInformerImpl) ClustersSynced() bool {
}
// Adds the given cluster to federated informer.
func (f *federatedInformerImpl) addCluster(cluster *federation_api.Cluster) {
func (f *federatedInformerImpl) addCluster(cluster *federationapi.Cluster) {
f.Lock()
defer f.Unlock()
name := cluster.Name
@ -405,7 +405,7 @@ func (f *federatedInformerImpl) addCluster(cluster *federation_api.Cluster) {
}
// Removes the cluster from federated informer.
func (f *federatedInformerImpl) deleteCluster(cluster *federation_api.Cluster) {
func (f *federatedInformerImpl) deleteCluster(cluster *federationapi.Cluster) {
f.Lock()
defer f.Unlock()
name := cluster.Name
@ -486,7 +486,7 @@ func (fs *federatedStoreImpl) GetKeyFor(item interface{}) string {
// Checks whether stores for all clusters form the lists (and only these) are there and
// are synced.
func (fs *federatedStoreImpl) ClustersSynced(clusters []*federation_api.Cluster) bool {
func (fs *federatedStoreImpl) ClustersSynced(clusters []*federationapi.Cluster) bool {
// Get the list of informers to check under a lock and check it outside.
okSoFar, informersToCheck := func() (bool, []informer) {

View File

@ -20,12 +20,12 @@ import (
"testing"
"time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fakefederationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch"
@ -39,18 +39,18 @@ func TestFederatedInformer(t *testing.T) {
fakeFederationClient := &fakefederationclientset.Clientset{}
// Add a single cluster to federation and remove it when needed.
cluster := federation_api.Cluster{
ObjectMeta: api_v1.ObjectMeta{
cluster := federationapi.Cluster{
ObjectMeta: apiv1.ObjectMeta{
Name: "mycluster",
},
Status: federation_api.ClusterStatus{
Conditions: []federation_api.ClusterCondition{
{Type: federation_api.ClusterReady, Status: api_v1.ConditionTrue},
Status: federationapi.ClusterStatus{
Conditions: []federationapi.ClusterCondition{
{Type: federationapi.ClusterReady, Status: apiv1.ConditionTrue},
},
},
}
fakeFederationClient.AddReactor("list", "clusters", func(action core.Action) (bool, runtime.Object, error) {
return true, &federation_api.ClusterList{Items: []federation_api.Cluster{cluster}}, nil
return true, &federationapi.ClusterList{Items: []federationapi.Cluster{cluster}}, nil
})
deleteChan := make(chan struct{})
fakeFederationClient.AddWatchReactor("clusters", func(action core.Action) (bool, watch.Interface, error) {
@ -62,32 +62,32 @@ func TestFederatedInformer(t *testing.T) {
return true, fakeWatch, nil
})
fakeKubeClient := &fake_kubeclientset.Clientset{}
fakeKubeClient := &fakekubeclientset.Clientset{}
// There is a single service ns1/s1 in cluster mycluster.
service := api_v1.Service{
ObjectMeta: api_v1.ObjectMeta{
service := apiv1.Service{
ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
},
}
fakeKubeClient.AddReactor("list", "services", func(action core.Action) (bool, runtime.Object, error) {
return true, &api_v1.ServiceList{Items: []api_v1.Service{service}}, nil
return true, &apiv1.ServiceList{Items: []apiv1.Service{service}}, nil
})
fakeKubeClient.AddWatchReactor("services", func(action core.Action) (bool, watch.Interface, error) {
return true, watch.NewFake(), nil
})
targetInformerFactory := func(cluster *federation_api.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
targetInformerFactory := func(cluster *federationapi.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (runtime.Object, error) {
return clientset.Core().Services(api_v1.NamespaceAll).List(options)
ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) {
return clientset.Core().Services(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
return clientset.Core().Services(api_v1.NamespaceAll).Watch(options)
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return clientset.Core().Services(apiv1.NamespaceAll).Watch(options)
},
},
&api_v1.Service{},
&apiv1.Service{},
10*time.Second,
cache.ResourceEventHandlerFuncs{})
}
@ -95,25 +95,25 @@ func TestFederatedInformer(t *testing.T) {
addedClusters := make(chan string, 1)
deletedClusters := make(chan string, 1)
lifecycle := ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) {
ClusterAvailable: func(cluster *federationapi.Cluster) {
addedClusters <- cluster.Name
close(addedClusters)
},
ClusterUnavailable: func(cluster *federation_api.Cluster, _ []interface{}) {
ClusterUnavailable: func(cluster *federationapi.Cluster, _ []interface{}) {
deletedClusters <- cluster.Name
close(deletedClusters)
},
}
informer := NewFederatedInformer(fakeFederationClient, targetInformerFactory, &lifecycle).(*federatedInformerImpl)
informer.clientFactory = func(cluster *federation_api.Cluster) (kubeclientset.Interface, error) {
informer.clientFactory = func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
return fakeKubeClient, nil
}
assert.NotNil(t, informer)
informer.Start()
// Wait until mycluster is synced.
for !informer.GetTargetStore().ClustersSynced([]*federation_api.Cluster{&cluster}) {
for !informer.GetTargetStore().ClustersSynced([]*federationapi.Cluster{&cluster}) {
time.Sleep(time.Millisecond * 100)
}
readyClusters, err := informer.GetReadyClusters()
@ -131,7 +131,7 @@ func TestFederatedInformer(t *testing.T) {
// All checked, lets delete the cluster.
deleteChan <- struct{}{}
for !informer.GetTargetStore().ClustersSynced([]*federation_api.Cluster{}) {
for !informer.GetTargetStore().ClustersSynced([]*federationapi.Cluster{}) {
time.Sleep(time.Millisecond * 100)
}
readyClusters, err = informer.GetReadyClusters()

View File

@ -21,7 +21,7 @@ import (
"time"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
)
// Type of the operation that can be executed in Federated.
@ -37,7 +37,7 @@ const (
type FederatedOperation struct {
Type FederatedOperationType
ClusterName string
Obj pkg_runtime.Object
Obj pkgruntime.Object
}
// A helper that executes the given set of updates on federation, in parallel.
@ -52,7 +52,7 @@ type FederatedUpdater interface {
}
// A function that executes some operation using the passed client and object.
type FederatedOperationHandler func(kubeclientset.Interface, pkg_runtime.Object) error
type FederatedOperationHandler func(kubeclientset.Interface, pkgruntime.Object) error
type federatedUpdaterImpl struct {
federation FederationView

View File

@ -21,11 +21,11 @@ import (
"testing"
"time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"github.com/stretchr/testify/assert"
)
@ -38,18 +38,18 @@ type fakeFederationView struct {
var _ FederationView = &fakeFederationView{}
func (f *fakeFederationView) GetClientsetForCluster(clusterName string) (kubeclientset.Interface, error) {
return &fake_kubeclientset.Clientset{}, nil
return &fakekubeclientset.Clientset{}, nil
}
func (f *fakeFederationView) GetReadyClusters() ([]*federation_api.Cluster, error) {
return []*federation_api.Cluster{}, nil
func (f *fakeFederationView) GetReadyClusters() ([]*federationapi.Cluster, error) {
return []*federationapi.Cluster{}, nil
}
func (f *fakeFederationView) GetUnreadyClusters() ([]*federation_api.Cluster, error) {
return []*federation_api.Cluster{}, nil
func (f *fakeFederationView) GetUnreadyClusters() ([]*federationapi.Cluster, error) {
return []*federationapi.Cluster{}, nil
}
func (f *fakeFederationView) GetReadyCluster(name string) (*federation_api.Cluster, bool, error) {
func (f *fakeFederationView) GetReadyCluster(name string) (*federationapi.Cluster, bool, error) {
return nil, false, nil
}
@ -62,13 +62,13 @@ func TestFederatedUpdaterOK(t *testing.T) {
updateChan := make(chan string, 5)
updater := NewFederatedUpdater(&fakeFederationView{},
func(_ kubeclientset.Interface, obj pkg_runtime.Object) error {
service := obj.(*api_v1.Service)
func(_ kubeclientset.Interface, obj pkgruntime.Object) error {
service := obj.(*apiv1.Service)
addChan <- service.Name
return nil
},
func(_ kubeclientset.Interface, obj pkg_runtime.Object) error {
service := obj.(*api_v1.Service)
func(_ kubeclientset.Interface, obj pkgruntime.Object) error {
service := obj.(*apiv1.Service)
updateChan <- service.Name
return nil
},
@ -93,7 +93,7 @@ func TestFederatedUpdaterOK(t *testing.T) {
func TestFederatedUpdaterError(t *testing.T) {
updater := NewFederatedUpdater(&fakeFederationView{},
func(_ kubeclientset.Interface, obj pkg_runtime.Object) error {
func(_ kubeclientset.Interface, obj pkgruntime.Object) error {
return fmt.Errorf("boom")
}, noop, noop)
@ -113,7 +113,7 @@ func TestFederatedUpdaterError(t *testing.T) {
func TestFederatedUpdaterTimeout(t *testing.T) {
start := time.Now()
updater := NewFederatedUpdater(&fakeFederationView{},
func(_ kubeclientset.Interface, obj pkg_runtime.Object) error {
func(_ kubeclientset.Interface, obj pkgruntime.Object) error {
time.Sleep(time.Minute)
return nil
},
@ -134,15 +134,15 @@ func TestFederatedUpdaterTimeout(t *testing.T) {
assert.True(t, start.Add(10*time.Second).After(end))
}
func makeService(cluster, name string) *api_v1.Service {
return &api_v1.Service{
ObjectMeta: api_v1.ObjectMeta{
func makeService(cluster, name string) *apiv1.Service {
return &apiv1.Service{
ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1",
Name: name,
},
}
}
func noop(_ kubeclientset.Interface, _ pkg_runtime.Object) error {
func noop(_ kubeclientset.Interface, _ pkgruntime.Object) error {
return nil
}

View File

@ -20,25 +20,25 @@ import (
"fmt"
"reflect"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
)
// Returns cache.ResourceEventHandlerFuncs that trigger the given function
// on all object changes.
func NewTriggerOnAllChanges(triggerFunc func(pkg_runtime.Object)) *cache.ResourceEventHandlerFuncs {
func NewTriggerOnAllChanges(triggerFunc func(pkgruntime.Object)) *cache.ResourceEventHandlerFuncs {
return &cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) {
oldObj := old.(pkg_runtime.Object)
oldObj := old.(pkgruntime.Object)
triggerFunc(oldObj)
},
AddFunc: func(cur interface{}) {
curObj := cur.(pkg_runtime.Object)
curObj := cur.(pkgruntime.Object)
triggerFunc(curObj)
},
UpdateFunc: func(old, cur interface{}) {
curObj := cur.(pkg_runtime.Object)
curObj := cur.(pkgruntime.Object)
if !reflect.DeepEqual(old, cur) {
triggerFunc(curObj)
}
@ -48,7 +48,7 @@ func NewTriggerOnAllChanges(triggerFunc func(pkg_runtime.Object)) *cache.Resourc
// Returns cache.ResourceEventHandlerFuncs that trigger the given function
// on object add and delete as well as spec/object meta on update.
func NewTriggerOnMetaAndSpecChanges(triggerFunc func(pkg_runtime.Object)) *cache.ResourceEventHandlerFuncs {
func NewTriggerOnMetaAndSpecChanges(triggerFunc func(pkgruntime.Object)) *cache.ResourceEventHandlerFuncs {
getFieldOrPanic := func(obj interface{}, fieldName string) interface{} {
val := reflect.ValueOf(obj).Elem().FieldByName(fieldName)
if val.IsValid() {
@ -59,17 +59,17 @@ func NewTriggerOnMetaAndSpecChanges(triggerFunc func(pkg_runtime.Object)) *cache
}
return &cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) {
oldObj := old.(pkg_runtime.Object)
oldObj := old.(pkgruntime.Object)
triggerFunc(oldObj)
},
AddFunc: func(cur interface{}) {
curObj := cur.(pkg_runtime.Object)
curObj := cur.(pkgruntime.Object)
triggerFunc(curObj)
},
UpdateFunc: func(old, cur interface{}) {
curObj := cur.(pkg_runtime.Object)
oldMeta := getFieldOrPanic(old, "ObjectMeta").(api_v1.ObjectMeta)
curMeta := getFieldOrPanic(cur, "ObjectMeta").(api_v1.ObjectMeta)
curObj := cur.(pkgruntime.Object)
oldMeta := getFieldOrPanic(old, "ObjectMeta").(apiv1.ObjectMeta)
curMeta := getFieldOrPanic(cur, "ObjectMeta").(apiv1.ObjectMeta)
if !ObjectMetaEquivalent(oldMeta, curMeta) ||
!reflect.DeepEqual(getFieldOrPanic(old, "Spec"), getFieldOrPanic(cur, "Spec")) {
triggerFunc(curObj)

View File

@ -19,22 +19,22 @@ package util
import (
"testing"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"github.com/stretchr/testify/assert"
)
func TestHandlers(t *testing.T) {
// There is a single service ns1/s1 in cluster mycluster.
service := api_v1.Service{
ObjectMeta: api_v1.ObjectMeta{
service := apiv1.Service{
ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
},
}
service2 := api_v1.Service{
ObjectMeta: api_v1.ObjectMeta{
service2 := apiv1.Service{
ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
Annotations: map[string]string{
@ -53,7 +53,7 @@ func TestHandlers(t *testing.T) {
}
trigger := NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) {
func(obj pkgruntime.Object) {
triggerChan <- struct{}{}
})
@ -67,7 +67,7 @@ func TestHandlers(t *testing.T) {
assert.True(t, triggered())
trigger2 := NewTriggerOnMetaAndSpecChanges(
func(obj pkg_runtime.Object) {
func(obj pkgruntime.Object) {
triggerChan <- struct{}{}
},
)
@ -81,14 +81,14 @@ func TestHandlers(t *testing.T) {
trigger2.OnUpdate(&service, &service2)
assert.True(t, triggered())
service3 := api_v1.Service{
ObjectMeta: api_v1.ObjectMeta{
service3 := apiv1.Service{
ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1",
Name: "s1",
},
Status: api_v1.ServiceStatus{
LoadBalancer: api_v1.LoadBalancerStatus{
Ingress: []api_v1.LoadBalancerIngress{{
Status: apiv1.ServiceStatus{
LoadBalancer: apiv1.LoadBalancerStatus{
Ingress: []apiv1.LoadBalancerIngress{{
Hostname: "A",
}},
},

View File

@ -20,19 +20,19 @@ import (
"hash/fnv"
"sort"
fed_api "k8s.io/kubernetes/federation/apis/federation"
fedapi "k8s.io/kubernetes/federation/apis/federation"
)
// Planner decides how many out of the given replicas should be placed in each of the
// federated clusters.
type Planner struct {
preferences *fed_api.FederatedReplicaSetPreferences
preferences *fedapi.FederatedReplicaSetPreferences
}
type namedClusterReplicaSetPreferences struct {
clusterName string
hash uint32
fed_api.ClusterReplicaSetPreferences
fedapi.ClusterReplicaSetPreferences
}
type byWeight []*namedClusterReplicaSetPreferences
@ -46,7 +46,7 @@ func (a byWeight) Less(i, j int) bool {
return (a[i].Weight > a[j].Weight) || (a[i].Weight == a[j].Weight && a[i].hash < a[j].hash)
}
func NewPlanner(preferences *fed_api.FederatedReplicaSetPreferences) *Planner {
func NewPlanner(preferences *fedapi.FederatedReplicaSetPreferences) *Planner {
return &Planner{
preferences: preferences,
}
@ -71,7 +71,7 @@ func (p *Planner) Plan(replicasToDistribute int64, availableClusters []string, c
plan := make(map[string]int64, len(preferences))
overflow := make(map[string]int64, len(preferences))
named := func(name string, pref fed_api.ClusterReplicaSetPreferences) *namedClusterReplicaSetPreferences {
named := func(name string, pref fedapi.ClusterReplicaSetPreferences) *namedClusterReplicaSetPreferences {
// Seems to work better than addler for our case.
hasher := fnv.New32()
hasher.Write([]byte(name))

View File

@ -19,13 +19,13 @@ package planner
import (
"testing"
fed_api "k8s.io/kubernetes/federation/apis/federation"
fedapi "k8s.io/kubernetes/federation/apis/federation"
"github.com/stretchr/testify/assert"
)
func doCheck(t *testing.T, pref map[string]fed_api.ClusterReplicaSetPreferences, replicas int64, clusters []string, expected map[string]int64) {
planer := NewPlanner(&fed_api.FederatedReplicaSetPreferences{
func doCheck(t *testing.T, pref map[string]fedapi.ClusterReplicaSetPreferences, replicas int64, clusters []string, expected map[string]int64) {
planer := NewPlanner(&fedapi.FederatedReplicaSetPreferences{
Clusters: pref,
})
plan, overflow := planer.Plan(replicas, clusters, map[string]int64{}, map[string]int64{}, "")
@ -33,9 +33,9 @@ func doCheck(t *testing.T, pref map[string]fed_api.ClusterReplicaSetPreferences,
assert.Equal(t, 0, len(overflow))
}
func doCheckWithExisting(t *testing.T, pref map[string]fed_api.ClusterReplicaSetPreferences, replicas int64, clusters []string,
func doCheckWithExisting(t *testing.T, pref map[string]fedapi.ClusterReplicaSetPreferences, replicas int64, clusters []string,
existing map[string]int64, expected map[string]int64) {
planer := NewPlanner(&fed_api.FederatedReplicaSetPreferences{
planer := NewPlanner(&fedapi.FederatedReplicaSetPreferences{
Clusters: pref,
})
plan, overflow := planer.Plan(replicas, clusters, existing, map[string]int64{}, "")
@ -43,12 +43,12 @@ func doCheckWithExisting(t *testing.T, pref map[string]fed_api.ClusterReplicaSet
assert.EqualValues(t, expected, plan)
}
func doCheckWithExistingAndCapacity(t *testing.T, rebalance bool, pref map[string]fed_api.ClusterReplicaSetPreferences, replicas int64, clusters []string,
func doCheckWithExistingAndCapacity(t *testing.T, rebalance bool, pref map[string]fedapi.ClusterReplicaSetPreferences, replicas int64, clusters []string,
existing map[string]int64,
capacity map[string]int64,
expected map[string]int64,
expectedOverflow map[string]int64) {
planer := NewPlanner(&fed_api.FederatedReplicaSetPreferences{
planer := NewPlanner(&fedapi.FederatedReplicaSetPreferences{
Rebalance: rebalance,
Clusters: pref,
})
@ -62,102 +62,102 @@ func pint(val int64) *int64 {
}
func TestEqual(t *testing.T) {
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B", "C"},
// hash dependent
map[string]int64{"A": 16, "B": 17, "C": 17})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 25, "B": 25})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
1, []string{"A", "B"},
// hash dependent
map[string]int64{"A": 0, "B": 1})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
1, []string{"A", "B", "C", "D"},
// hash dependent
map[string]int64{"A": 0, "B": 0, "C": 0, "D": 1})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
1, []string{"A"},
map[string]int64{"A": 1})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
1, []string{},
map[string]int64{})
}
func TestEqualWithExisting(t *testing.T) {
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{"C": 30},
map[string]int64{"A": 10, "B": 10, "C": 30})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 30},
map[string]int64{"A": 30, "B": 20})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 0, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 1, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 4, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 5, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 6, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
15, []string{"A", "B"},
map[string]int64{"A": 7, "B": 8},
map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
500000, []string{"A", "B"},
map[string]int64{"A": 300000},
map[string]int64{"A": 300000, "B": 200000})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 10},
map[string]int64{"A": 25, "B": 25})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 10, "B": 70},
@ -165,13 +165,13 @@ func TestEqualWithExisting(t *testing.T) {
// TODO: Should be 10:40, update algorithm. Issue: #31816
map[string]int64{"A": 0, "B": 50})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
1, []string{"A", "B"},
map[string]int64{"A": 30},
map[string]int64{"A": 1, "B": 0})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B"},
map[string]int64{"A": 10, "B": 20},
@ -180,7 +180,7 @@ func TestEqualWithExisting(t *testing.T) {
func TestWithExistingAndCapacity(t *testing.T) {
// desired without capacity: map[string]int64{"A": 17, "B": 17, "C": 16})
doCheckWithExistingAndCapacity(t, true, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{},
@ -189,7 +189,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{"C": 7})
// desired B:50 C:0
doCheckWithExistingAndCapacity(t, true, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000},
"B": {Weight: 1}},
50, []string{"B", "C"},
@ -200,7 +200,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
)
// desired A:20 B:40
doCheckWithExistingAndCapacity(t, true, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 1},
"B": {Weight: 2}},
60, []string{"A", "B", "C"},
@ -210,7 +210,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{"B": 30})
// map[string]int64{"A": 10, "B": 30, "C": 21, "D": 10})
doCheckWithExistingAndCapacity(t, true, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(21)},
@ -223,7 +223,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
)
// desired A:20 B:20
doCheckWithExistingAndCapacity(t, false, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 1},
"B": {Weight: 1}},
60, []string{"A", "B", "C"},
@ -233,7 +233,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{"A": 20, "B": 20})
// desired A:10 B:50 although A:50 B:10 is fuly acceptable because rebalance = false
doCheckWithExistingAndCapacity(t, false, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 1},
"B": {Weight: 5}},
60, []string{"A", "B", "C"},
@ -242,7 +242,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{"A": 50, "B": 10, "C": 0},
map[string]int64{})
doCheckWithExistingAndCapacity(t, false, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 20, Weight: 0}},
50, []string{"A", "B", "C"},
map[string]int64{},
@ -251,7 +251,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{})
// Actually we would like to have extra 20 in B but 15 is also good.
doCheckWithExistingAndCapacity(t, true, map[string]fed_api.ClusterReplicaSetPreferences{
doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 20, Weight: 1}},
60, []string{"A", "B"},
map[string]int64{},
@ -261,75 +261,75 @@ func TestWithExistingAndCapacity(t *testing.T) {
}
func TestMin(t *testing.T) {
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 2, Weight: 0}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 2, "B": 2, "C": 2})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 20, Weight: 0}},
50, []string{"A", "B", "C"},
// hash dependant.
map[string]int64{"A": 10, "B": 20, "C": 20})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 20, Weight: 0},
"A": {MinReplicas: 100, Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 50, "B": 0, "C": 0})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 10, Weight: 1, MaxReplicas: pint(12)}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 12, "B": 12, "C": 12})
}
func TestMax(t *testing.T) {
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1, MaxReplicas: pint(2)}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 2, "B": 2, "C": 2})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 0, MaxReplicas: pint(2)}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 0, "B": 0, "C": 0})
}
func TestWeight(t *testing.T) {
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 1},
"B": {Weight: 2}},
60, []string{"A", "B", "C"},
map[string]int64{"A": 20, "B": 40, "C": 0})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000},
"B": {Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 50, "B": 0, "C": 0})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000},
"B": {Weight: 1}},
50, []string{"B", "C"},
map[string]int64{"B": 50, "C": 0})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 10, "B": 20, "C": 20})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(10)}},
50, []string{"A", "B", "C"},
map[string]int64{"A": 10, "B": 30, "C": 10})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(21)},
@ -337,7 +337,7 @@ func TestWeight(t *testing.T) {
71, []string{"A", "B", "C", "D"},
map[string]int64{"A": 10, "B": 30, "C": 21, "D": 10})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{
doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(21)},

View File

@ -24,10 +24,10 @@ import (
"sync"
"time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
@ -250,7 +250,7 @@ func CheckObjectFromChan(c chan runtime.Object, checkFunction CheckingFunction)
}
// CompareObjectMeta returns an error when the given objects are not equivalent.
func CompareObjectMeta(a, b api_v1.ObjectMeta) error {
func CompareObjectMeta(a, b apiv1.ObjectMeta) error {
if a.Namespace != b.Namespace {
return fmt.Errorf("Different namespace expected:%s observed:%s", a.Namespace, b.Namespace)
}
@ -272,15 +272,15 @@ func ToFederatedInformerForTestOnly(informer util.FederatedInformer) util.Federa
}
// NewCluster builds a new cluster object.
func NewCluster(name string, readyStatus api_v1.ConditionStatus) *federation_api.Cluster {
return &federation_api.Cluster{
ObjectMeta: api_v1.ObjectMeta{
func NewCluster(name string, readyStatus apiv1.ConditionStatus) *federationapi.Cluster {
return &federationapi.Cluster{
ObjectMeta: apiv1.ObjectMeta{
Name: name,
Annotations: map[string]string{},
},
Status: federation_api.ClusterStatus{
Conditions: []federation_api.ClusterCondition{
{Type: federation_api.ClusterReady, Status: readyStatus},
Status: federationapi.ClusterStatus{
Conditions: []federationapi.ClusterCondition{
{Type: federationapi.ClusterReady, Status: readyStatus},
},
},
}

View File

@ -45,7 +45,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/v1/service"
"k8s.io/kubernetes/pkg/cloudprovider"
aws_credentials "k8s.io/kubernetes/pkg/credentialprovider/aws"
awscredentials "k8s.io/kubernetes/pkg/credentialprovider/aws"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/volume"
@ -172,7 +172,7 @@ const DefaultVolumeType = "gp2"
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits
const DefaultMaxEBSVolumes = 39
// Used to call aws_credentials.Init() just once
// Used to call awscredentials.Init() just once
var once sync.Once
// Services is an abstraction over AWS, to allow mocking/other implementations
@ -720,7 +720,7 @@ func getAvailabilityZone(metadata EC2Metadata) (string, error) {
}
func isRegionValid(region string) bool {
for _, r := range aws_credentials.AWSRegions {
for _, r := range awscredentials.AWSRegions {
if r == region {
return true
}
@ -836,7 +836,7 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) {
// Register handler for ECR credentials
once.Do(func() {
aws_credentials.Init()
awscredentials.Init()
})
return awsCloud, nil

View File

@ -32,11 +32,11 @@ import (
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers"
v2_monitors "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors"
v2_pools "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools"
v2monitors "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors"
v2pools "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules"
neutron_ports "github.com/rackspace/gophercloud/openstack/networking/v2/ports"
neutronports "github.com/rackspace/gophercloud/openstack/networking/v2/ports"
"github.com/rackspace/gophercloud/pagination"
"k8s.io/kubernetes/pkg/api/v1"
@ -80,12 +80,12 @@ func networkExtensions(client *gophercloud.ServiceClient) (map[string]bool, erro
return seen, err
}
func getPortByIP(client *gophercloud.ServiceClient, ipAddress string) (neutron_ports.Port, error) {
var targetPort neutron_ports.Port
func getPortByIP(client *gophercloud.ServiceClient, ipAddress string) (neutronports.Port, error) {
var targetPort neutronports.Port
var portFound = false
err := neutron_ports.List(client, neutron_ports.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
portList, err := neutron_ports.ExtractPorts(page)
err := neutronports.List(client, neutronports.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
portList, err := neutronports.ExtractPorts(page)
if err != nil {
return false, err
}
@ -293,10 +293,10 @@ func getListenerForPort(existingListeners []listeners.Listener, port v1.ServiceP
}
// Get pool for a listener. A listener always has exactly one pool.
func getPoolByListenerID(client *gophercloud.ServiceClient, loadbalancerID string, listenerID string) (*v2_pools.Pool, error) {
listenerPools := make([]v2_pools.Pool, 0, 1)
err := v2_pools.List(client, v2_pools.ListOpts{LoadbalancerID: loadbalancerID}).EachPage(func(page pagination.Page) (bool, error) {
poolsList, err := v2_pools.ExtractPools(page)
func getPoolByListenerID(client *gophercloud.ServiceClient, loadbalancerID string, listenerID string) (*v2pools.Pool, error) {
listenerPools := make([]v2pools.Pool, 0, 1)
err := v2pools.List(client, v2pools.ListOpts{LoadbalancerID: loadbalancerID}).EachPage(func(page pagination.Page) (bool, error) {
poolsList, err := v2pools.ExtractPools(page)
if err != nil {
return false, err
}
@ -328,10 +328,10 @@ func getPoolByListenerID(client *gophercloud.ServiceClient, loadbalancerID strin
return &listenerPools[0], nil
}
func getMembersByPoolID(client *gophercloud.ServiceClient, id string) ([]v2_pools.Member, error) {
var members []v2_pools.Member
err := v2_pools.ListAssociateMembers(client, id, v2_pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
membersList, err := v2_pools.ExtractMembers(page)
func getMembersByPoolID(client *gophercloud.ServiceClient, id string) ([]v2pools.Member, error) {
var members []v2pools.Member
err := v2pools.ListAssociateMembers(client, id, v2pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
membersList, err := v2pools.ExtractMembers(page)
if err != nil {
return false, err
}
@ -347,10 +347,10 @@ func getMembersByPoolID(client *gophercloud.ServiceClient, id string) ([]v2_pool
}
// Each pool has exactly one or zero monitors. ListOpts does not seem to filter anything.
func getMonitorByPoolID(client *gophercloud.ServiceClient, id string) (*v2_monitors.Monitor, error) {
var monitorList []v2_monitors.Monitor
err := v2_monitors.List(client, v2_monitors.ListOpts{PoolID: id}).EachPage(func(page pagination.Page) (bool, error) {
monitorsList, err := v2_monitors.ExtractMonitors(page)
func getMonitorByPoolID(client *gophercloud.ServiceClient, id string) (*v2monitors.Monitor, error) {
var monitorList []v2monitors.Monitor
err := v2monitors.List(client, v2monitors.ListOpts{PoolID: id}).EachPage(func(page pagination.Page) (bool, error) {
monitorsList, err := v2monitors.ExtractMonitors(page)
if err != nil {
return false, err
}
@ -385,7 +385,7 @@ func getMonitorByPoolID(client *gophercloud.ServiceClient, id string) (*v2_monit
}
// Check if a member exists for node
func memberExists(members []v2_pools.Member, addr string, port int) bool {
func memberExists(members []v2pools.Member, addr string, port int) bool {
for _, member := range members {
if member.Address == addr && member.ProtocolPort == port {
return true
@ -407,7 +407,7 @@ func popListener(existingListeners []listeners.Listener, id string) []listeners.
return existingListeners
}
func popMember(members []v2_pools.Member, addr string, port int) []v2_pools.Member {
func popMember(members []v2pools.Member, addr string, port int) []v2pools.Member {
for i, member := range members {
if member.Address == addr && member.ProtocolPort == port {
members[i] = members[len(members)-1]
@ -596,12 +596,12 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
}
affinity := v1.ServiceAffinityNone
var persistence *v2_pools.SessionPersistence
var persistence *v2pools.SessionPersistence
switch affinity {
case v1.ServiceAffinityNone:
persistence = nil
case v1.ServiceAffinityClientIP:
persistence = &v2_pools.SessionPersistence{Type: "SOURCE_IP"}
persistence = &v2pools.SessionPersistence{Type: "SOURCE_IP"}
default:
return nil, fmt.Errorf("unsupported load balancer affinity: %v", affinity)
}
@ -624,9 +624,9 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
lbmethod := v2_pools.LBMethod(lbaas.opts.LBMethod)
lbmethod := v2pools.LBMethod(lbaas.opts.LBMethod)
if lbmethod == "" {
lbmethod = v2_pools.LBMethodRoundRobin
lbmethod = v2pools.LBMethodRoundRobin
}
oldListeners, err := getListenersByLoadBalancerID(lbaas.network, loadbalancer.ID)
@ -662,9 +662,9 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
}
if pool == nil {
glog.V(4).Infof("Creating pool for listener %s", listener.ID)
pool, err = v2_pools.Create(lbaas.network, v2_pools.CreateOpts{
pool, err = v2pools.Create(lbaas.network, v2pools.CreateOpts{
Name: fmt.Sprintf("pool_%s_%d", name, portIndex),
Protocol: v2_pools.Protocol(port.Protocol),
Protocol: v2pools.Protocol(port.Protocol),
LBMethod: lbmethod,
ListenerID: listener.ID,
Persistence: persistence,
@ -695,7 +695,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
if !memberExists(members, addr, int(port.NodePort)) {
glog.V(4).Infof("Creating member for pool %s", pool.ID)
_, err := v2_pools.CreateAssociateMember(lbaas.network, pool.ID, v2_pools.MemberCreateOpts{
_, err := v2pools.CreateAssociateMember(lbaas.network, pool.ID, v2pools.MemberCreateOpts{
ProtocolPort: int(port.NodePort),
Address: addr,
SubnetID: lbaas.opts.SubnetId,
@ -716,7 +716,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
// Delete obsolete members for this pool
for _, member := range members {
glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address)
err := v2_pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr()
err := v2pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr()
if err != nil && !isNotFound(err) {
return nil, fmt.Errorf("Error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err)
}
@ -726,7 +726,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
monitorID := pool.MonitorID
if monitorID == "" && lbaas.opts.CreateMonitor {
glog.V(4).Infof("Creating monitor for pool %s", pool.ID)
monitor, err := v2_monitors.Create(lbaas.network, v2_monitors.CreateOpts{
monitor, err := v2monitors.Create(lbaas.network, v2monitors.CreateOpts{
PoolID: pool.ID,
Type: string(port.Protocol),
Delay: int(lbaas.opts.MonitorDelay.Duration.Seconds()),
@ -756,7 +756,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
monitorID := pool.MonitorID
if monitorID != "" {
glog.V(4).Infof("Deleting obsolete monitor %s for pool %s", monitorID, pool.ID)
err = v2_monitors.Delete(lbaas.network, monitorID).ExtractErr()
err = v2monitors.Delete(lbaas.network, monitorID).ExtractErr()
if err != nil && !isNotFound(err) {
return nil, fmt.Errorf("Error deleting obsolete monitor %s for pool %s: %v", monitorID, pool.ID, err)
}
@ -770,7 +770,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
if members != nil {
for _, member := range members {
glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address)
err := v2_pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr()
err := v2pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr()
if err != nil && !isNotFound(err) {
return nil, fmt.Errorf("Error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err)
}
@ -779,7 +779,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
}
glog.V(4).Infof("Deleting obsolete pool %s for listener %s", pool.ID, listener.ID)
// delete pool
err = v2_pools.Delete(lbaas.network, pool.ID).ExtractErr()
err = v2pools.Delete(lbaas.network, pool.ID).ExtractErr()
if err != nil && !isNotFound(err) {
return nil, fmt.Errorf("Error deleting obsolete pool %s for listener %s: %v", pool.ID, listener.ID, err)
}
@ -923,9 +923,9 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
return nil, err
}
update_opts := neutron_ports.UpdateOpts{SecurityGroups: []string{lbSecGroup.ID}}
update_opts := neutronports.UpdateOpts{SecurityGroups: []string{lbSecGroup.ID}}
res := neutron_ports.Update(lbaas.network, port.ID, update_opts)
res := neutronports.Update(lbaas.network, port.ID, update_opts)
if res.Err != nil {
glog.Errorf("Error occured updating port: %s", port.ID)
@ -986,9 +986,9 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service
}
// Get all pools for this loadbalancer, by listener ID.
lbPools := make(map[string]v2_pools.Pool)
err = v2_pools.List(lbaas.network, v2_pools.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) {
poolsList, err := v2_pools.ExtractPools(page)
lbPools := make(map[string]v2pools.Pool)
err = v2pools.List(lbaas.network, v2pools.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) {
poolsList, err := v2pools.ExtractPools(page)
if err != nil {
return false, err
}
@ -1038,9 +1038,9 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service
}
// Find existing pool members (by address) for this port
members := make(map[string]v2_pools.Member)
err := v2_pools.ListAssociateMembers(lbaas.network, pool.ID, v2_pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
membersList, err := v2_pools.ExtractMembers(page)
members := make(map[string]v2pools.Member)
err := v2pools.ListAssociateMembers(lbaas.network, pool.ID, v2pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
membersList, err := v2pools.ExtractMembers(page)
if err != nil {
return false, err
}
@ -1059,7 +1059,7 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service
// Already exists, do not create member
continue
}
_, err := v2_pools.CreateAssociateMember(lbaas.network, pool.ID, v2_pools.MemberCreateOpts{
_, err := v2pools.CreateAssociateMember(lbaas.network, pool.ID, v2pools.MemberCreateOpts{
Address: addr,
ProtocolPort: int(port.NodePort),
SubnetID: lbaas.opts.SubnetId,
@ -1076,7 +1076,7 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service
// Still present, do not delete member
continue
}
err = v2_pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr()
err = v2pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr()
if err != nil && !isNotFound(err) {
return err
}
@ -1137,8 +1137,8 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
// get all pools (and health monitors) associated with this loadbalancer
var poolIDs []string
var monitorIDs []string
err = v2_pools.List(lbaas.network, v2_pools.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) {
poolsList, err := v2_pools.ExtractPools(page)
err = v2pools.List(lbaas.network, v2pools.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) {
poolsList, err := v2pools.ExtractPools(page)
if err != nil {
return false, err
}
@ -1157,8 +1157,8 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
// get all members associated with each poolIDs
var memberIDs []string
for _, poolID := range poolIDs {
err := v2_pools.ListAssociateMembers(lbaas.network, poolID, v2_pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
membersList, err := v2_pools.ExtractMembers(page)
err := v2pools.ListAssociateMembers(lbaas.network, poolID, v2pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
membersList, err := v2pools.ExtractMembers(page)
if err != nil {
return false, err
}
@ -1176,7 +1176,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
// delete all monitors
for _, monitorID := range monitorIDs {
err := v2_monitors.Delete(lbaas.network, monitorID).ExtractErr()
err := v2monitors.Delete(lbaas.network, monitorID).ExtractErr()
if err != nil && !isNotFound(err) {
return err
}
@ -1187,7 +1187,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
for _, poolID := range poolIDs {
// delete all members for this pool
for _, memberID := range memberIDs {
err := v2_pools.DeleteMember(lbaas.network, poolID, memberID).ExtractErr()
err := v2pools.DeleteMember(lbaas.network, poolID, memberID).ExtractErr()
if err != nil && !isNotFound(err) {
return err
}
@ -1195,7 +1195,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
}
// delete pool
err := v2_pools.Delete(lbaas.network, poolID).ExtractErr()
err := v2pools.Delete(lbaas.network, poolID).ExtractErr()
if err != nil && !isNotFound(err) {
return err
}

View File

@ -25,7 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
api_pod "k8s.io/kubernetes/pkg/api/v1/pod"
apipod "k8s.io/kubernetes/pkg/api/v1/pod"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/types"
@ -211,8 +211,8 @@ func (f *fakePetClient) Update(expected, wanted *pcb) error {
pets := []*pcb{}
for i, pet := range f.pets {
if wanted.pod.Name == pet.pod.Name {
f.pets[i].pod.Annotations[api_pod.PodHostnameAnnotation] = wanted.pod.Annotations[api_pod.PodHostnameAnnotation]
f.pets[i].pod.Annotations[api_pod.PodSubdomainAnnotation] = wanted.pod.Annotations[api_pod.PodSubdomainAnnotation]
f.pets[i].pod.Annotations[apipod.PodHostnameAnnotation] = wanted.pod.Annotations[apipod.PodHostnameAnnotation]
f.pets[i].pod.Annotations[apipod.PodSubdomainAnnotation] = wanted.pod.Annotations[apipod.PodSubdomainAnnotation]
f.pets[i].pod.Spec = wanted.pod.Spec
found = true
}

View File

@ -24,7 +24,7 @@ import (
"testing"
"k8s.io/kubernetes/pkg/api/v1"
api_pod "k8s.io/kubernetes/pkg/api/v1/pod"
apipod "k8s.io/kubernetes/pkg/api/v1/pod"
)
func TestPetIDName(t *testing.T) {
@ -54,11 +54,11 @@ func TestPetIDDNS(t *testing.T) {
if err != nil {
t.Fatalf("Failed to generate pet %v", err)
}
if hostname, ok := pod.Annotations[api_pod.PodHostnameAnnotation]; !ok || hostname != petName {
if hostname, ok := pod.Annotations[apipod.PodHostnameAnnotation]; !ok || hostname != petName {
t.Errorf("Wrong hostname: %v", hostname)
}
// TODO: Check this against the governing service.
if subdomain, ok := pod.Annotations[api_pod.PodSubdomainAnnotation]; !ok || subdomain != petSubdomain {
if subdomain, ok := pod.Annotations[apipod.PodSubdomainAnnotation]; !ok || subdomain != petSubdomain {
t.Errorf("Wrong subdomain: %v", subdomain)
}
}

View File

@ -25,7 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
fake_internal "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
fakeinternal "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1/fake"
"k8s.io/kubernetes/pkg/controller"
@ -281,7 +281,7 @@ func TestSyncStatefulSetBlockedPet(t *testing.T) {
}
type fakeClient struct {
fake_internal.Clientset
fakeinternal.Clientset
statefulsetClient *fakeStatefulSetClient
}

View File

@ -43,7 +43,7 @@ import (
"k8s.io/kubernetes/pkg/watch"
heapster "k8s.io/heapster/metrics/api/v1/types"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"github.com/stretchr/testify/assert"
)
@ -298,15 +298,15 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
var heapsterRawMemResponse []byte
if tc.useMetricsApi {
metrics := metrics_api.PodMetricsList{}
metrics := metricsapi.PodMetricsList{}
for i, cpu := range tc.reportedLevels {
podMetric := metrics_api.PodMetrics{
podMetric := metricsapi.PodMetrics{
ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
Namespace: namespace,
},
Timestamp: unversioned.Time{Time: time.Now()},
Containers: []metrics_api.ContainerMetrics{
Containers: []metricsapi.ContainerMetrics{
{
Name: "container",
Usage: v1.ResourceList{

View File

@ -29,7 +29,7 @@ import (
"k8s.io/kubernetes/pkg/labels"
heapster "k8s.io/heapster/metrics/api/v1/types"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
)
// PodResourceInfo contains pod resourcemetric values as a map from pod names to
@ -92,7 +92,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
metrics := metrics_api.PodMetricsList{}
metrics := metricsapi.PodMetricsList{}
err = json.Unmarshal(resultRaw, &metrics)
if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)

View File

@ -34,7 +34,7 @@ import (
"k8s.io/kubernetes/pkg/runtime"
heapster "k8s.io/heapster/metrics/api/v1/types"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"github.com/stretchr/testify/assert"
)
@ -103,18 +103,18 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
if isResource {
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
metrics := metrics_api.PodMetricsList{}
metrics := metricsapi.PodMetricsList{}
for i, containers := range tc.reportedPodMetrics {
metric := metrics_api.PodMetrics{
metric := metricsapi.PodMetrics{
ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
Namespace: namespace,
},
Timestamp: unversioned.Time{Time: fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)},
Containers: []metrics_api.ContainerMetrics{},
Containers: []metricsapi.ContainerMetrics{},
}
for j, cpu := range containers {
cm := metrics_api.ContainerMetrics{
cm := metricsapi.ContainerMetrics{
Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j),
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(

View File

@ -36,7 +36,7 @@ import (
"k8s.io/kubernetes/pkg/runtime"
heapster "k8s.io/heapster/metrics/api/v1/types"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -131,15 +131,15 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
var heapsterRawMemResponse []byte
if tc.resource != nil {
metrics := metrics_api.PodMetricsList{}
metrics := metricsapi.PodMetricsList{}
for i, resValue := range tc.resource.levels {
podMetric := metrics_api.PodMetrics{
podMetric := metricsapi.PodMetrics{
ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
Namespace: testNamespace,
},
Timestamp: unversioned.Time{Time: tc.timestamp},
Containers: []metrics_api.ContainerMetrics{
Containers: []metricsapi.ContainerMetrics{
{
Name: "container1",
Usage: v1.ResourceList{

View File

@ -29,12 +29,12 @@ import (
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/fields"
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics"
"k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait"
@ -99,7 +99,7 @@ type ServiceController struct {
// (like load balancers) in sync with the registry.
func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterName string) (*ServiceController, error) {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
recorder := broadcaster.NewRecorder(v1.EventSource{Component: "service-controller"})
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
@ -121,7 +121,7 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
}
s.serviceStore.Indexer, s.serviceController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) {
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return s.kubeClient.Core().Services(v1.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {

View File

@ -27,7 +27,7 @@ import (
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/conversion"
@ -62,7 +62,7 @@ func NewController(p ControllerParameters) *PersistentVolumeController {
eventRecorder := p.EventRecorder
if eventRecorder == nil {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: p.KubeClient.Core().Events("")})
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: p.KubeClient.Core().Events("")})
eventRecorder = broadcaster.NewRecorder(v1.EventSource{Component: "persistentvolume-controller"})
}

View File

@ -29,7 +29,7 @@ import (
etcd "github.com/coreos/etcd/client"
"github.com/miekg/dns"
skymsg "github.com/skynetservices/skydns/msg"
skyServer "github.com/skynetservices/skydns/server"
skyserver "github.com/skynetservices/skydns/server"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/kubernetes/pkg/api/unversioned"
@ -171,9 +171,9 @@ func assertSRVRecordsMatchPort(t *testing.T, records []dns.RR, port ...int) {
func TestSkySimpleSRVLookup(t *testing.T) {
kd := newKubeDNS()
skydnsConfig := &skyServer.Config{Domain: testDomain, DnsAddr: "0.0.0.0:53"}
skyServer.SetDefaults(skydnsConfig)
s := skyServer.New(kd, skydnsConfig)
skydnsConfig := &skyserver.Config{Domain: testDomain, DnsAddr: "0.0.0.0:53"}
skyserver.SetDefaults(skydnsConfig)
s := skyserver.New(kd, skydnsConfig)
service := newHeadlessService()
endpointIPs := []string{"10.0.0.1", "10.0.0.2"}
@ -201,9 +201,9 @@ func TestSkySimpleSRVLookup(t *testing.T) {
func TestSkyPodHostnameSRVLookup(t *testing.T) {
kd := newKubeDNS()
skydnsConfig := &skyServer.Config{Domain: testDomain, DnsAddr: "0.0.0.0:53"}
skyServer.SetDefaults(skydnsConfig)
s := skyServer.New(kd, skydnsConfig)
skydnsConfig := &skyserver.Config{Domain: testDomain, DnsAddr: "0.0.0.0:53"}
skyserver.SetDefaults(skydnsConfig)
s := skyserver.New(kd, skydnsConfig)
service := newHeadlessService()
endpointIPs := []string{"10.0.0.1", "10.0.0.2"}
@ -240,9 +240,9 @@ func TestSkyPodHostnameSRVLookup(t *testing.T) {
func TestSkyNamedPortSRVLookup(t *testing.T) {
kd := newKubeDNS()
skydnsConfig := &skyServer.Config{Domain: testDomain, DnsAddr: "0.0.0.0:53"}
skyServer.SetDefaults(skydnsConfig)
s := skyServer.New(kd, skydnsConfig)
skydnsConfig := &skyserver.Config{Domain: testDomain, DnsAddr: "0.0.0.0:53"}
skyserver.SetDefaults(skydnsConfig)
s := skyserver.New(kd, skydnsConfig)
service := newHeadlessService()
eip := "10.0.0.1"

View File

@ -25,7 +25,7 @@ import (
"net/url"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"k8s.io/kubernetes/pkg/client/restclient/fake"
cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing"
)
@ -85,14 +85,14 @@ func TestTopPodAllInNamespaceMetrics(t *testing.T) {
metrics := testPodMetricsData()
testNamespace := "testnamespace"
nonTestNamespace := "anothernamespace"
expectedMetrics := metrics_api.PodMetricsList{
expectedMetrics := metricsapi.PodMetricsList{
ListMeta: metrics.ListMeta,
Items: metrics.Items[0:2],
}
for _, m := range expectedMetrics.Items {
m.Namespace = testNamespace
}
nonExpectedMetrics := metrics_api.PodMetricsList{
nonExpectedMetrics := metricsapi.PodMetricsList{
ListMeta: metrics.ListMeta,
Items: metrics.Items[2:],
}
@ -144,7 +144,7 @@ func TestTopPodWithNameMetrics(t *testing.T) {
initTestErrorHandler(t)
metrics := testPodMetricsData()
expectedMetrics := metrics.Items[0]
nonExpectedMetrics := metrics_api.PodMetricsList{
nonExpectedMetrics := metricsapi.PodMetricsList{
ListMeta: metrics.ListMeta,
Items: metrics.Items[1:],
}
@ -192,11 +192,11 @@ func TestTopPodWithNameMetrics(t *testing.T) {
func TestTopPodWithLabelSelectorMetrics(t *testing.T) {
initTestErrorHandler(t)
metrics := testPodMetricsData()
expectedMetrics := metrics_api.PodMetricsList{
expectedMetrics := metricsapi.PodMetricsList{
ListMeta: metrics.ListMeta,
Items: metrics.Items[0:2],
}
nonExpectedMetrics := metrics_api.PodMetricsList{
nonExpectedMetrics := metricsapi.PodMetricsList{
ListMeta: metrics.ListMeta,
Items: metrics.Items[2:],
}
@ -249,7 +249,7 @@ func TestTopPodWithContainersMetrics(t *testing.T) {
initTestErrorHandler(t)
metrics := testPodMetricsData()
expectedMetrics := metrics.Items[0]
nonExpectedMetrics := metrics_api.PodMetricsList{
nonExpectedMetrics := metricsapi.PodMetricsList{
ListMeta: metrics.ListMeta,
Items: metrics.Items[1:],
}

View File

@ -25,7 +25,7 @@ import (
"testing"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
@ -59,12 +59,12 @@ func marshallBody(metrics interface{}) (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(result)), nil
}
func testNodeMetricsData() (*metrics_api.NodeMetricsList, *api.NodeList) {
metrics := &metrics_api.NodeMetricsList{
func testNodeMetricsData() (*metricsapi.NodeMetricsList, *api.NodeList) {
metrics := &metricsapi.NodeMetricsList{
ListMeta: unversioned.ListMeta{
ResourceVersion: "1",
},
Items: []metrics_api.NodeMetrics{
Items: []metricsapi.NodeMetrics{
{
ObjectMeta: v1.ObjectMeta{Name: "node1", ResourceVersion: "10"},
Window: unversioned.Duration{Duration: time.Minute},
@ -115,16 +115,16 @@ func testNodeMetricsData() (*metrics_api.NodeMetricsList, *api.NodeList) {
return metrics, nodes
}
func testPodMetricsData() *metrics_api.PodMetricsList {
return &metrics_api.PodMetricsList{
func testPodMetricsData() *metricsapi.PodMetricsList {
return &metricsapi.PodMetricsList{
ListMeta: unversioned.ListMeta{
ResourceVersion: "2",
},
Items: []metrics_api.PodMetrics{
Items: []metricsapi.PodMetrics{
{
ObjectMeta: v1.ObjectMeta{Name: "pod1", Namespace: "test", ResourceVersion: "10"},
Window: unversioned.Duration{Duration: time.Minute},
Containers: []metrics_api.ContainerMetrics{
Containers: []metricsapi.ContainerMetrics{
{
Name: "container1-1",
Usage: v1.ResourceList{
@ -146,7 +146,7 @@ func testPodMetricsData() *metrics_api.PodMetricsList {
{
ObjectMeta: v1.ObjectMeta{Name: "pod2", Namespace: "test", ResourceVersion: "11"},
Window: unversioned.Duration{Duration: time.Minute},
Containers: []metrics_api.ContainerMetrics{
Containers: []metricsapi.ContainerMetrics{
{
Name: "container2-1",
Usage: v1.ResourceList{
@ -176,7 +176,7 @@ func testPodMetricsData() *metrics_api.PodMetricsList {
{
ObjectMeta: v1.ObjectMeta{Name: "pod3", Namespace: "test", ResourceVersion: "12"},
Window: unversioned.Duration{Duration: time.Minute},
Containers: []metrics_api.ContainerMetrics{
Containers: []metricsapi.ContainerMetrics{
{
Name: "container3-1",
Usage: v1.ResourceList{

View File

@ -19,7 +19,7 @@ package util
import (
"sync"
fed_clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
@ -33,7 +33,7 @@ func NewClientCache(loader clientcmd.ClientConfig) *ClientCache {
return &ClientCache{
clientsets: make(map[schema.GroupVersion]*internalclientset.Clientset),
configs: make(map[schema.GroupVersion]*restclient.Config),
fedClientSets: make(map[schema.GroupVersion]fed_clientset.Interface),
fedClientSets: make(map[schema.GroupVersion]fedclientset.Interface),
loader: loader,
}
}
@ -43,7 +43,7 @@ func NewClientCache(loader clientcmd.ClientConfig) *ClientCache {
type ClientCache struct {
loader clientcmd.ClientConfig
clientsets map[schema.GroupVersion]*internalclientset.Clientset
fedClientSets map[schema.GroupVersion]fed_clientset.Interface
fedClientSets map[schema.GroupVersion]fedclientset.Interface
configs map[schema.GroupVersion]*restclient.Config
matchVersion bool
@ -158,7 +158,7 @@ func (c *ClientCache) ClientSetForVersion(requiredVersion *schema.GroupVersion)
return clientset, nil
}
func (c *ClientCache) FederationClientSetForVersion(version *schema.GroupVersion) (fed_clientset.Interface, error) {
func (c *ClientCache) FederationClientSetForVersion(version *schema.GroupVersion) (fedclientset.Interface, error) {
if version != nil {
if clientSet, found := c.fedClientSets[*version]; found {
return clientSet, nil
@ -170,7 +170,7 @@ func (c *ClientCache) FederationClientSetForVersion(version *schema.GroupVersion
}
// TODO: support multi versions of client with clientset
clientSet, err := fed_clientset.NewForConfig(config)
clientSet, err := fedclientset.NewForConfig(config)
if err != nil {
return nil, err
}
@ -178,7 +178,7 @@ func (c *ClientCache) FederationClientSetForVersion(version *schema.GroupVersion
if version != nil {
configCopy := *config
clientSet, err := fed_clientset.NewForConfig(&configCopy)
clientSet, err := fedclientset.NewForConfig(&configCopy)
if err != nil {
return nil, err
}

View File

@ -29,7 +29,7 @@ import (
"time"
"k8s.io/kubernetes/federation/apis/federation"
fed_clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/events"
@ -2320,7 +2320,7 @@ func describeConfigMap(configMap *api.ConfigMap) (string, error) {
}
type ClusterDescriber struct {
fed_clientset.Interface
fedclientset.Interface
}
func (d *ClusterDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) {

View File

@ -26,7 +26,7 @@ import (
"time"
"k8s.io/kubernetes/federation/apis/federation"
fed_fake "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/fake"
fedfake "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/fake"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
@ -665,7 +665,7 @@ func TestDescribeCluster(t *testing.T) {
},
},
}
fake := fed_fake.NewSimpleClientset(&cluster)
fake := fedfake.NewSimpleClientset(&cluster)
d := ClusterDescriber{Interface: fake}
out, err := d.Describe("any", "foo", DescriberSettings{ShowEvents: true})
if err != nil {

View File

@ -21,7 +21,7 @@ import (
"errors"
"fmt"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/validation"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
@ -97,63 +97,63 @@ func nodeMetricsUrl(name string) (string, error) {
return fmt.Sprintf("%s/nodes/%s", metricsRoot, name), nil
}
func (cli *HeapsterMetricsClient) GetNodeMetrics(nodeName string, selector labels.Selector) ([]metrics_api.NodeMetrics, error) {
func (cli *HeapsterMetricsClient) GetNodeMetrics(nodeName string, selector labels.Selector) ([]metricsapi.NodeMetrics, error) {
params := map[string]string{"labelSelector": selector.String()}
path, err := nodeMetricsUrl(nodeName)
if err != nil {
return []metrics_api.NodeMetrics{}, err
return []metricsapi.NodeMetrics{}, err
}
resultRaw, err := GetHeapsterMetrics(cli, path, params)
if err != nil {
return []metrics_api.NodeMetrics{}, err
return []metricsapi.NodeMetrics{}, err
}
metrics := make([]metrics_api.NodeMetrics, 0)
metrics := make([]metricsapi.NodeMetrics, 0)
if len(nodeName) == 0 {
metricsList := metrics_api.NodeMetricsList{}
metricsList := metricsapi.NodeMetricsList{}
err = json.Unmarshal(resultRaw, &metricsList)
if err != nil {
return []metrics_api.NodeMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
return []metricsapi.NodeMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
}
metrics = append(metrics, metricsList.Items...)
} else {
var singleMetric metrics_api.NodeMetrics
var singleMetric metricsapi.NodeMetrics
err = json.Unmarshal(resultRaw, &singleMetric)
if err != nil {
return []metrics_api.NodeMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
return []metricsapi.NodeMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
}
metrics = append(metrics, singleMetric)
}
return metrics, nil
}
func (cli *HeapsterMetricsClient) GetPodMetrics(namespace string, podName string, allNamespaces bool, selector labels.Selector) ([]metrics_api.PodMetrics, error) {
func (cli *HeapsterMetricsClient) GetPodMetrics(namespace string, podName string, allNamespaces bool, selector labels.Selector) ([]metricsapi.PodMetrics, error) {
if allNamespaces {
namespace = api.NamespaceAll
}
path, err := podMetricsUrl(namespace, podName)
if err != nil {
return []metrics_api.PodMetrics{}, err
return []metricsapi.PodMetrics{}, err
}
params := map[string]string{"labelSelector": selector.String()}
allMetrics := make([]metrics_api.PodMetrics, 0)
allMetrics := make([]metricsapi.PodMetrics, 0)
resultRaw, err := GetHeapsterMetrics(cli, path, params)
if err != nil {
return []metrics_api.PodMetrics{}, err
return []metricsapi.PodMetrics{}, err
}
if len(podName) == 0 {
metrics := metrics_api.PodMetricsList{}
metrics := metricsapi.PodMetricsList{}
err = json.Unmarshal(resultRaw, &metrics)
if err != nil {
return []metrics_api.PodMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
return []metricsapi.PodMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
}
allMetrics = append(allMetrics, metrics.Items...)
} else {
var singleMetric metrics_api.PodMetrics
var singleMetric metricsapi.PodMetrics
err = json.Unmarshal(resultRaw, &singleMetric)
if err != nil {
return []metrics_api.PodMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
return []metricsapi.PodMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
}
allMetrics = append(allMetrics, singleMetric)
}

View File

@ -20,7 +20,7 @@ import (
"fmt"
"io"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/kubectl"
@ -51,7 +51,7 @@ func NewTopCmdPrinter(out io.Writer) *TopCmdPrinter {
return &TopCmdPrinter{out: out}
}
func (printer *TopCmdPrinter) PrintNodeMetrics(metrics []metrics_api.NodeMetrics, availableResources map[string]api.ResourceList) error {
func (printer *TopCmdPrinter) PrintNodeMetrics(metrics []metricsapi.NodeMetrics, availableResources map[string]api.ResourceList) error {
if len(metrics) == 0 {
return nil
}
@ -74,7 +74,7 @@ func (printer *TopCmdPrinter) PrintNodeMetrics(metrics []metrics_api.NodeMetrics
return nil
}
func (printer *TopCmdPrinter) PrintPodMetrics(metrics []metrics_api.PodMetrics, printContainers bool, withNamespace bool) error {
func (printer *TopCmdPrinter) PrintPodMetrics(metrics []metricsapi.PodMetrics, printContainers bool, withNamespace bool) error {
if len(metrics) == 0 {
return nil
}
@ -104,7 +104,7 @@ func printColumnNames(out io.Writer, names []string) {
fmt.Fprint(out, "\n")
}
func printSinglePodMetrics(out io.Writer, m *metrics_api.PodMetrics, printContainersOnly bool, withNamespace bool) error {
func printSinglePodMetrics(out io.Writer, m *metricsapi.PodMetrics, printContainersOnly bool, withNamespace bool) error {
containers := make(map[string]api.ResourceList)
podMetrics := make(api.ResourceList)
for _, res := range MeasuredResources {

View File

@ -19,20 +19,20 @@ package api
import (
"time"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
// RuntimeVersioner contains methods for runtime name, version and API version.
type RuntimeVersioner interface {
// Version returns the runtime name, runtime version and runtime API version
Version(apiVersion string) (*runtimeApi.VersionResponse, error)
Version(apiVersion string) (*runtimeapi.VersionResponse, error)
}
// ContainerManager contains methods to manipulate containers managed by a
// container runtime. The methods are thread-safe.
type ContainerManager interface {
// CreateContainer creates a new container in specified PodSandbox.
CreateContainer(podSandboxID string, config *runtimeApi.ContainerConfig, sandboxConfig *runtimeApi.PodSandboxConfig) (string, error)
CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error)
// StartContainer starts the container.
StartContainer(containerID string) error
// StopContainer stops a running container with a grace period (i.e., timeout).
@ -40,16 +40,16 @@ type ContainerManager interface {
// RemoveContainer removes the container.
RemoveContainer(containerID string) error
// ListContainers lists all containers by filters.
ListContainers(filter *runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error)
ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error)
// ContainerStatus returns the status of the container.
ContainerStatus(containerID string) (*runtimeApi.ContainerStatus, error)
ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error)
// ExecSync executes a command in the container, and returns the stdout output.
// If command exits with a non-zero exit code, an error is returned.
ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error)
// Exec prepares a streaming endpoint to execute a command in the container, and returns the address.
Exec(*runtimeApi.ExecRequest) (*runtimeApi.ExecResponse, error)
Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error)
// Attach prepares a streaming endpoint to attach to a running container, and returns the address.
Attach(req *runtimeApi.AttachRequest) (*runtimeApi.AttachResponse, error)
Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error)
}
// PodSandboxManager contains methods for operating on PodSandboxes. The methods
@ -57,7 +57,7 @@ type ContainerManager interface {
type PodSandboxManager interface {
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure
// the sandbox is in ready state.
RunPodSandbox(config *runtimeApi.PodSandboxConfig) (string, error)
RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error)
// StopPodSandbox stops the sandbox. If there are any running containers in the
// sandbox, they should be force terminated.
StopPodSandbox(podSandboxID string) error
@ -65,11 +65,11 @@ type PodSandboxManager interface {
// sandbox, they should be forcibly removed.
RemovePodSandbox(podSandboxID string) error
// PodSandboxStatus returns the Status of the PodSandbox.
PodSandboxStatus(podSandboxID string) (*runtimeApi.PodSandboxStatus, error)
PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error)
// ListPodSandbox returns a list of Sandbox.
ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]*runtimeApi.PodSandbox, error)
ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error)
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address.
PortForward(*runtimeApi.PortForwardRequest) (*runtimeApi.PortForwardResponse, error)
PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error)
}
// RuntimeService interface should be implemented by a container runtime.
@ -80,9 +80,9 @@ type RuntimeService interface {
PodSandboxManager
// UpdateRuntimeConfig updates runtime configuration if specified
UpdateRuntimeConfig(runtimeConfig *runtimeApi.RuntimeConfig) error
UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error
// Status returns the status of the runtime.
Status() (*runtimeApi.RuntimeStatus, error)
Status() (*runtimeapi.RuntimeStatus, error)
}
// ImageManagerService interface should be implemented by a container image
@ -90,11 +90,11 @@ type RuntimeService interface {
// The methods should be thread-safe.
type ImageManagerService interface {
// ListImages lists the existing images.
ListImages(filter *runtimeApi.ImageFilter) ([]*runtimeApi.Image, error)
ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error)
// ImageStatus returns the status of the image.
ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi.Image, error)
ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error)
// PullImage pulls an image with the authentication config.
PullImage(image *runtimeApi.ImageSpec, auth *runtimeApi.AuthConfig) error
PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) error
// RemoveImage removes the image.
RemoveImage(image *runtimeApi.ImageSpec) error
RemoveImage(image *runtimeapi.ImageSpec) error
}

View File

@ -19,7 +19,7 @@ package testing
import (
"sync"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
)
@ -28,14 +28,14 @@ type FakeImageService struct {
FakeImageSize uint64
Called []string
Images map[string]*runtimeApi.Image
Images map[string]*runtimeapi.Image
}
func (r *FakeImageService) SetFakeImages(images []string) {
r.Lock()
defer r.Unlock()
r.Images = make(map[string]*runtimeApi.Image)
r.Images = make(map[string]*runtimeapi.Image)
for _, image := range images {
r.Images[image] = r.makeFakeImage(image)
}
@ -51,25 +51,25 @@ func (r *FakeImageService) SetFakeImageSize(size uint64) {
func NewFakeImageService() *FakeImageService {
return &FakeImageService{
Called: make([]string, 0),
Images: make(map[string]*runtimeApi.Image),
Images: make(map[string]*runtimeapi.Image),
}
}
func (r *FakeImageService) makeFakeImage(image string) *runtimeApi.Image {
return &runtimeApi.Image{
func (r *FakeImageService) makeFakeImage(image string) *runtimeapi.Image {
return &runtimeapi.Image{
Id: &image,
Size_: &r.FakeImageSize,
RepoTags: []string{image},
}
}
func (r *FakeImageService) ListImages(filter *runtimeApi.ImageFilter) ([]*runtimeApi.Image, error) {
func (r *FakeImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ListImages")
images := make([]*runtimeApi.Image, 0)
images := make([]*runtimeapi.Image, 0)
for _, img := range r.Images {
if filter != nil && filter.Image != nil {
if !sliceutils.StringInSlice(filter.Image.GetImage(), img.RepoTags) {
@ -82,7 +82,7 @@ func (r *FakeImageService) ListImages(filter *runtimeApi.ImageFilter) ([]*runtim
return images, nil
}
func (r *FakeImageService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi.Image, error) {
func (r *FakeImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
r.Lock()
defer r.Unlock()
@ -91,7 +91,7 @@ func (r *FakeImageService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi
return r.Images[image.GetImage()], nil
}
func (r *FakeImageService) PullImage(image *runtimeApi.ImageSpec, auth *runtimeApi.AuthConfig) error {
func (r *FakeImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) error {
r.Lock()
defer r.Unlock()
@ -107,7 +107,7 @@ func (r *FakeImageService) PullImage(image *runtimeApi.ImageSpec, auth *runtimeA
return nil
}
func (r *FakeImageService) RemoveImage(image *runtimeApi.ImageSpec) error {
func (r *FakeImageService) RemoveImage(image *runtimeapi.ImageSpec) error {
r.Lock()
defer r.Unlock()

View File

@ -22,7 +22,7 @@ import (
"sync"
"time"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
var (
@ -34,12 +34,12 @@ var (
type FakePodSandbox struct {
// PodSandboxStatus contains the runtime information for a sandbox.
runtimeApi.PodSandboxStatus
runtimeapi.PodSandboxStatus
}
type FakeContainer struct {
// ContainerStatus contains the runtime information for a container.
runtimeApi.ContainerStatus
runtimeapi.ContainerStatus
// the sandbox id of this container
SandboxID string
@ -50,7 +50,7 @@ type FakeRuntimeService struct {
Called []string
FakeStatus *runtimeApi.RuntimeStatus
FakeStatus *runtimeapi.RuntimeStatus
Containers map[string]*FakeContainer
Sandboxes map[string]*FakePodSandbox
}
@ -96,13 +96,13 @@ func NewFakeRuntimeService() *FakeRuntimeService {
}
}
func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeApi.VersionResponse, error) {
func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "Version")
return &runtimeApi.VersionResponse{
return &runtimeapi.VersionResponse{
Version: &version,
RuntimeName: &FakeRuntimeName,
RuntimeVersion: &version,
@ -110,7 +110,7 @@ func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeApi.VersionResp
}, nil
}
func (r *FakeRuntimeService) Status() (*runtimeApi.RuntimeStatus, error) {
func (r *FakeRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) {
r.Lock()
defer r.Unlock()
@ -119,7 +119,7 @@ func (r *FakeRuntimeService) Status() (*runtimeApi.RuntimeStatus, error) {
return r.FakeStatus, nil
}
func (r *FakeRuntimeService) RunPodSandbox(config *runtimeApi.PodSandboxConfig) (string, error) {
func (r *FakeRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error) {
r.Lock()
defer r.Unlock()
@ -129,14 +129,14 @@ func (r *FakeRuntimeService) RunPodSandbox(config *runtimeApi.PodSandboxConfig)
// fixed name from BuildSandboxName() for easily making fake sandboxes.
podSandboxID := BuildSandboxName(config.Metadata)
createdAt := time.Now().Unix()
readyState := runtimeApi.PodSandboxState_SANDBOX_READY
readyState := runtimeapi.PodSandboxState_SANDBOX_READY
r.Sandboxes[podSandboxID] = &FakePodSandbox{
PodSandboxStatus: runtimeApi.PodSandboxStatus{
PodSandboxStatus: runtimeapi.PodSandboxStatus{
Id: &podSandboxID,
Metadata: config.Metadata,
State: &readyState,
CreatedAt: &createdAt,
Network: &runtimeApi.PodSandboxNetworkStatus{
Network: &runtimeapi.PodSandboxNetworkStatus{
Ip: &FakePodSandboxIP,
},
Labels: config.Labels,
@ -153,7 +153,7 @@ func (r *FakeRuntimeService) StopPodSandbox(podSandboxID string) error {
r.Called = append(r.Called, "StopPodSandbox")
notReadyState := runtimeApi.PodSandboxState_SANDBOX_NOTREADY
notReadyState := runtimeapi.PodSandboxState_SANDBOX_NOTREADY
if s, ok := r.Sandboxes[podSandboxID]; ok {
s.State = &notReadyState
} else {
@ -175,7 +175,7 @@ func (r *FakeRuntimeService) RemovePodSandbox(podSandboxID string) error {
return nil
}
func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeApi.PodSandboxStatus, error) {
func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) {
r.Lock()
defer r.Unlock()
@ -190,13 +190,13 @@ func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeApi.
return &status, nil
}
func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]*runtimeApi.PodSandbox, error) {
func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ListPodSandbox")
result := make([]*runtimeApi.PodSandbox, 0)
result := make([]*runtimeapi.PodSandbox, 0)
for id, s := range r.Sandboxes {
if filter != nil {
if filter.Id != nil && filter.GetId() != id {
@ -210,7 +210,7 @@ func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter)
}
}
result = append(result, &runtimeApi.PodSandbox{
result = append(result, &runtimeapi.PodSandbox{
Id: s.Id,
Metadata: s.Metadata,
State: s.State,
@ -223,15 +223,15 @@ func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter)
return result, nil
}
func (r *FakeRuntimeService) PortForward(*runtimeApi.PortForwardRequest) (*runtimeApi.PortForwardResponse, error) {
func (r *FakeRuntimeService) PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "PortForward")
return &runtimeApi.PortForwardResponse{}, nil
return &runtimeapi.PortForwardResponse{}, nil
}
func (r *FakeRuntimeService) CreateContainer(podSandboxID string, config *runtimeApi.ContainerConfig, sandboxConfig *runtimeApi.PodSandboxConfig) (string, error) {
func (r *FakeRuntimeService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
r.Lock()
defer r.Unlock()
@ -241,10 +241,10 @@ func (r *FakeRuntimeService) CreateContainer(podSandboxID string, config *runtim
// fixed BuildContainerName() for easily making fake containers.
containerID := BuildContainerName(config.Metadata, podSandboxID)
createdAt := time.Now().Unix()
createdState := runtimeApi.ContainerState_CONTAINER_CREATED
createdState := runtimeapi.ContainerState_CONTAINER_CREATED
imageRef := config.Image.GetImage()
r.Containers[containerID] = &FakeContainer{
ContainerStatus: runtimeApi.ContainerStatus{
ContainerStatus: runtimeapi.ContainerStatus{
Id: &containerID,
Metadata: config.Metadata,
Image: config.Image,
@ -273,7 +273,7 @@ func (r *FakeRuntimeService) StartContainer(containerID string) error {
// Set container to running.
startedAt := time.Now().Unix()
runningState := runtimeApi.ContainerState_CONTAINER_RUNNING
runningState := runtimeapi.ContainerState_CONTAINER_RUNNING
c.State = &runningState
c.StartedAt = &startedAt
@ -293,7 +293,7 @@ func (r *FakeRuntimeService) StopContainer(containerID string, timeout int64) er
// Set container to exited state.
finishedAt := time.Now().Unix()
exitedState := runtimeApi.ContainerState_CONTAINER_EXITED
exitedState := runtimeapi.ContainerState_CONTAINER_EXITED
c.State = &exitedState
c.FinishedAt = &finishedAt
@ -312,13 +312,13 @@ func (r *FakeRuntimeService) RemoveContainer(containerID string) error {
return nil
}
func (r *FakeRuntimeService) ListContainers(filter *runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) {
func (r *FakeRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "ListContainers")
result := make([]*runtimeApi.Container, 0)
result := make([]*runtimeapi.Container, 0)
for _, s := range r.Containers {
if filter != nil {
if filter.Id != nil && filter.GetId() != s.GetId() {
@ -335,7 +335,7 @@ func (r *FakeRuntimeService) ListContainers(filter *runtimeApi.ContainerFilter)
}
}
result = append(result, &runtimeApi.Container{
result = append(result, &runtimeapi.Container{
Id: s.Id,
CreatedAt: s.CreatedAt,
PodSandboxId: &s.SandboxID,
@ -351,7 +351,7 @@ func (r *FakeRuntimeService) ListContainers(filter *runtimeApi.ContainerFilter)
return result, nil
}
func (r *FakeRuntimeService) ContainerStatus(containerID string) (*runtimeApi.ContainerStatus, error) {
func (r *FakeRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
r.Lock()
defer r.Unlock()
@ -374,22 +374,22 @@ func (r *FakeRuntimeService) ExecSync(containerID string, cmd []string, timeout
return nil, nil, nil
}
func (r *FakeRuntimeService) Exec(*runtimeApi.ExecRequest) (*runtimeApi.ExecResponse, error) {
func (r *FakeRuntimeService) Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "Exec")
return &runtimeApi.ExecResponse{}, nil
return &runtimeapi.ExecResponse{}, nil
}
func (r *FakeRuntimeService) Attach(req *runtimeApi.AttachRequest) (*runtimeApi.AttachResponse, error) {
func (r *FakeRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
r.Lock()
defer r.Unlock()
r.Called = append(r.Called, "Attach")
return &runtimeApi.AttachResponse{}, nil
return &runtimeapi.AttachResponse{}, nil
}
func (r *FakeRuntimeService) UpdateRuntimeConfig(runtimeCOnfig *runtimeApi.RuntimeConfig) error {
func (r *FakeRuntimeService) UpdateRuntimeConfig(runtimeCOnfig *runtimeapi.RuntimeConfig) error {
return nil
}

View File

@ -19,15 +19,15 @@ package testing
import (
"fmt"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
func BuildContainerName(metadata *runtimeApi.ContainerMetadata, sandboxID string) string {
func BuildContainerName(metadata *runtimeapi.ContainerMetadata, sandboxID string) string {
// include the sandbox ID to make the container ID unique.
return fmt.Sprintf("%s_%s_%d", sandboxID, metadata.GetName(), metadata.GetAttempt())
}
func BuildSandboxName(metadata *runtimeApi.PodSandboxMetadata) string {
func BuildSandboxName(metadata *runtimeapi.PodSandboxMetadata) string {
return fmt.Sprintf("%s_%s_%s_%d", metadata.GetName(), metadata.GetNamespace(), metadata.GetUid(), metadata.GetAttempt())
}

View File

@ -26,7 +26,7 @@ import (
"github.com/golang/glog"
"github.com/google/cadvisor/cache/memory"
cadvisorMetrics "github.com/google/cadvisor/container"
cadvisormetrics "github.com/google/cadvisor/container"
"github.com/google/cadvisor/events"
cadvisorfs "github.com/google/cadvisor/fs"
cadvisorhttp "github.com/google/cadvisor/http"
@ -101,7 +101,7 @@ func New(port uint, runtime string, rootPath string) (Interface, error) {
}
// Create and start the cAdvisor container manager.
m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, maxHousekeepingInterval, allowDynamicHousekeeping, cadvisorMetrics.MetricSet{cadvisorMetrics.NetworkTcpUsageMetrics: struct{}{}}, http.DefaultClient)
m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, maxHousekeepingInterval, allowDynamicHousekeeping, cadvisormetrics.MetricSet{cadvisormetrics.NetworkTcpUsageMetrics: struct{}{}}, http.DefaultClient)
if err != nil {
return nil, err
}

View File

@ -17,12 +17,12 @@ limitations under the License.
package cadvisor
import (
cadvisorApi "github.com/google/cadvisor/info/v1"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
)
func CapacityFromMachineInfo(info *cadvisorApi.MachineInfo) v1.ResourceList {
func CapacityFromMachineInfo(info *cadvisorapi.MachineInfo) v1.ResourceList {
c := v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
int64(info.NumCores*1000),

View File

@ -28,7 +28,7 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/kubelet/util/ioutils"
"k8s.io/kubernetes/pkg/runtime"
@ -209,11 +209,11 @@ func ConvertPodStatusToRunningPod(runtimeName string, podStatus *PodStatus) Pod
// This is only needed because we need to return sandboxes as if they were
// kubecontainer.Containers to avoid substantial changes to PLEG.
// TODO: Remove this once it becomes obsolete.
func SandboxToContainerState(state runtimeApi.PodSandboxState) ContainerState {
func SandboxToContainerState(state runtimeapi.PodSandboxState) ContainerState {
switch state {
case runtimeApi.PodSandboxState_SANDBOX_READY:
case runtimeapi.PodSandboxState_SANDBOX_READY:
return ContainerStateRunning
case runtimeApi.PodSandboxState_SANDBOX_NOTREADY:
case runtimeapi.PodSandboxState_SANDBOX_NOTREADY:
return ContainerStateExited
}
return ContainerStateUnknown

View File

@ -26,7 +26,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/term"
@ -299,7 +299,7 @@ type PodStatus struct {
ContainerStatuses []*ContainerStatus
// Status of the pod sandbox.
// Only for kuberuntime now, other runtime may keep it nil.
SandboxStatuses []*runtimeApi.PodSandboxStatus
SandboxStatuses []*runtimeapi.PodSandboxStatus
}
// ContainerStatus represents the status of a container.

View File

@ -23,7 +23,7 @@ import (
dockertypes "github.com/docker/engine-api/types"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
// This file contains helper functions to convert docker API types to runtime
@ -36,13 +36,13 @@ const (
statusExitedPrefix = "Exited"
)
func imageToRuntimeAPIImage(image *dockertypes.Image) (*runtimeApi.Image, error) {
func imageToRuntimeAPIImage(image *dockertypes.Image) (*runtimeapi.Image, error) {
if image == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime API image")
}
size := uint64(image.VirtualSize)
return &runtimeApi.Image{
return &runtimeapi.Image{
Id: &image.ID,
RepoTags: image.RepoTags,
RepoDigests: image.RepoDigests,
@ -50,13 +50,13 @@ func imageToRuntimeAPIImage(image *dockertypes.Image) (*runtimeApi.Image, error)
}, nil
}
func imageInspectToRuntimeAPIImage(image *dockertypes.ImageInspect) (*runtimeApi.Image, error) {
func imageInspectToRuntimeAPIImage(image *dockertypes.ImageInspect) (*runtimeapi.Image, error) {
if image == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime API image")
}
size := uint64(image.VirtualSize)
runtimeImage := &runtimeApi.Image{
runtimeImage := &runtimeapi.Image{
Id: &image.ID,
RepoTags: image.RepoTags,
RepoDigests: image.RepoDigests,
@ -77,7 +77,7 @@ func toPullableImageID(id string, image *dockertypes.ImageInspect) string {
return imageID
}
func toRuntimeAPIContainer(c *dockertypes.Container) (*runtimeApi.Container, error) {
func toRuntimeAPIContainer(c *dockertypes.Container) (*runtimeapi.Container, error) {
state := toRuntimeAPIContainerState(c.Status)
if len(c.Names) == 0 {
return nil, fmt.Errorf("unexpected empty container name: %+v", c)
@ -90,11 +90,11 @@ func toRuntimeAPIContainer(c *dockertypes.Container) (*runtimeApi.Container, err
sandboxID := c.Labels[sandboxIDLabelKey]
// The timestamp in dockertypes.Container is in seconds.
createdAt := c.Created * int64(time.Second)
return &runtimeApi.Container{
return &runtimeapi.Container{
Id: &c.ID,
PodSandboxId: &sandboxID,
Metadata: metadata,
Image: &runtimeApi.ImageSpec{Image: &c.Image},
Image: &runtimeapi.ImageSpec{Image: &c.Image},
ImageRef: &c.ImageID,
State: &state,
CreatedAt: &createdAt,
@ -103,48 +103,48 @@ func toRuntimeAPIContainer(c *dockertypes.Container) (*runtimeApi.Container, err
}, nil
}
func toDockerContainerStatus(state runtimeApi.ContainerState) string {
func toDockerContainerStatus(state runtimeapi.ContainerState) string {
switch state {
case runtimeApi.ContainerState_CONTAINER_CREATED:
case runtimeapi.ContainerState_CONTAINER_CREATED:
return "created"
case runtimeApi.ContainerState_CONTAINER_RUNNING:
case runtimeapi.ContainerState_CONTAINER_RUNNING:
return "running"
case runtimeApi.ContainerState_CONTAINER_EXITED:
case runtimeapi.ContainerState_CONTAINER_EXITED:
return "exited"
case runtimeApi.ContainerState_CONTAINER_UNKNOWN:
case runtimeapi.ContainerState_CONTAINER_UNKNOWN:
fallthrough
default:
return "unknown"
}
}
func toRuntimeAPIContainerState(state string) runtimeApi.ContainerState {
func toRuntimeAPIContainerState(state string) runtimeapi.ContainerState {
// Parse the state string in dockertypes.Container. This could break when
// we upgrade docker.
switch {
case strings.HasPrefix(state, statusRunningPrefix):
return runtimeApi.ContainerState_CONTAINER_RUNNING
return runtimeapi.ContainerState_CONTAINER_RUNNING
case strings.HasPrefix(state, statusExitedPrefix):
return runtimeApi.ContainerState_CONTAINER_EXITED
return runtimeapi.ContainerState_CONTAINER_EXITED
case strings.HasPrefix(state, statusCreatedPrefix):
return runtimeApi.ContainerState_CONTAINER_CREATED
return runtimeapi.ContainerState_CONTAINER_CREATED
default:
return runtimeApi.ContainerState_CONTAINER_UNKNOWN
return runtimeapi.ContainerState_CONTAINER_UNKNOWN
}
}
func toRuntimeAPISandboxState(state string) runtimeApi.PodSandboxState {
func toRuntimeAPISandboxState(state string) runtimeapi.PodSandboxState {
// Parse the state string in dockertypes.Container. This could break when
// we upgrade docker.
switch {
case strings.HasPrefix(state, statusRunningPrefix):
return runtimeApi.PodSandboxState_SANDBOX_READY
return runtimeapi.PodSandboxState_SANDBOX_READY
default:
return runtimeApi.PodSandboxState_SANDBOX_NOTREADY
return runtimeapi.PodSandboxState_SANDBOX_NOTREADY
}
}
func toRuntimeAPISandbox(c *dockertypes.Container) (*runtimeApi.PodSandbox, error) {
func toRuntimeAPISandbox(c *dockertypes.Container) (*runtimeapi.PodSandbox, error) {
state := toRuntimeAPISandboxState(c.Status)
if len(c.Names) == 0 {
return nil, fmt.Errorf("unexpected empty sandbox name: %+v", c)
@ -156,7 +156,7 @@ func toRuntimeAPISandbox(c *dockertypes.Container) (*runtimeApi.PodSandbox, erro
labels, annotations := extractLabels(c.Labels)
// The timestamp in dockertypes.Container is in seconds.
createdAt := c.Created * int64(time.Second)
return &runtimeApi.PodSandbox{
return &runtimeapi.PodSandbox{
Id: &c.ID,
Metadata: metadata,
State: &state,

View File

@ -22,18 +22,18 @@ import (
dockertypes "github.com/docker/engine-api/types"
"github.com/stretchr/testify/assert"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
func TestConvertDockerStatusToRuntimeAPIState(t *testing.T) {
testCases := []struct {
input string
expected runtimeApi.ContainerState
expected runtimeapi.ContainerState
}{
{input: "Up 5 hours", expected: runtimeApi.ContainerState_CONTAINER_RUNNING},
{input: "Exited (0) 2 hours ago", expected: runtimeApi.ContainerState_CONTAINER_EXITED},
{input: "Created", expected: runtimeApi.ContainerState_CONTAINER_CREATED},
{input: "Random string", expected: runtimeApi.ContainerState_CONTAINER_UNKNOWN},
{input: "Up 5 hours", expected: runtimeapi.ContainerState_CONTAINER_RUNNING},
{input: "Exited (0) 2 hours ago", expected: runtimeapi.ContainerState_CONTAINER_EXITED},
{input: "Created", expected: runtimeapi.ContainerState_CONTAINER_CREATED},
{input: "Random string", expected: runtimeapi.ContainerState_CONTAINER_UNKNOWN},
}
for _, test := range testCases {

View File

@ -28,12 +28,12 @@ import (
dockerstrslice "github.com/docker/engine-api/types/strslice"
"github.com/golang/glog"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
)
// ListContainers lists all containers matching the filter.
func (ds *dockerService) ListContainers(filter *runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) {
func (ds *dockerService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
opts := dockertypes.ContainerListOptions{All: true}
opts.Filter = dockerfilters.NewArgs()
@ -63,7 +63,7 @@ func (ds *dockerService) ListContainers(filter *runtimeApi.ContainerFilter) ([]*
return nil, err
}
// Convert docker to runtime api containers.
result := []*runtimeApi.Container{}
result := []*runtimeapi.Container{}
for i := range containers {
c := containers[i]
@ -82,7 +82,7 @@ func (ds *dockerService) ListContainers(filter *runtimeApi.ContainerFilter) ([]*
// Docker cannot store the log to an arbitrary location (yet), so we create an
// symlink at LogPath, linking to the actual path of the log.
// TODO: check if the default values returned by the runtime API are ok.
func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeApi.ContainerConfig, sandboxConfig *runtimeApi.PodSandboxConfig) (string, error) {
func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
if config == nil {
return "", fmt.Errorf("container config is nil")
}
@ -283,7 +283,7 @@ func getContainerTimestamps(r *dockertypes.ContainerJSON) (time.Time, time.Time,
}
// ContainerStatus inspects the docker container and returns the status.
func (ds *dockerService) ContainerStatus(containerID string) (*runtimeApi.ContainerStatus, error) {
func (ds *dockerService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
r, err := ds.client.InspectContainer(containerID)
if err != nil {
return nil, err
@ -303,11 +303,11 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeApi.Contai
imageID := toPullableImageID(r.Image, ir)
// Convert the mounts.
mounts := []*runtimeApi.Mount{}
mounts := []*runtimeapi.Mount{}
for i := range r.Mounts {
m := r.Mounts[i]
readonly := !m.RW
mounts = append(mounts, &runtimeApi.Mount{
mounts = append(mounts, &runtimeapi.Mount{
HostPath: &m.Source,
ContainerPath: &m.Destination,
Readonly: &readonly,
@ -315,11 +315,11 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeApi.Contai
})
}
// Interpret container states.
var state runtimeApi.ContainerState
var state runtimeapi.ContainerState
var reason, message string
if r.State.Running {
// Container is running.
state = runtimeApi.ContainerState_CONTAINER_RUNNING
state = runtimeapi.ContainerState_CONTAINER_RUNNING
} else {
// Container is *not* running. We need to get more details.
// * Case 1: container has run and exited with non-zero finishedAt
@ -328,7 +328,7 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeApi.Contai
// time, but a non-zero exit code.
// * Case 3: container has been created, but not started (yet).
if !finishedAt.IsZero() { // Case 1
state = runtimeApi.ContainerState_CONTAINER_EXITED
state = runtimeapi.ContainerState_CONTAINER_EXITED
switch {
case r.State.OOMKilled:
// TODO: consider exposing OOMKilled via the runtimeAPI.
@ -341,13 +341,13 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeApi.Contai
reason = "Error"
}
} else if r.State.ExitCode != 0 { // Case 2
state = runtimeApi.ContainerState_CONTAINER_EXITED
state = runtimeapi.ContainerState_CONTAINER_EXITED
// Adjust finshedAt and startedAt time to createdAt time to avoid
// the confusion.
finishedAt, startedAt = createdAt, createdAt
reason = "ContainerCannotRun"
} else { // Case 3
state = runtimeApi.ContainerState_CONTAINER_CREATED
state = runtimeapi.ContainerState_CONTAINER_CREATED
}
message = r.State.Error
}
@ -362,10 +362,10 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeApi.Contai
}
labels, annotations := extractLabels(r.Config.Labels)
return &runtimeApi.ContainerStatus{
return &runtimeapi.ContainerStatus{
Id: &r.ID,
Metadata: metadata,
Image: &runtimeApi.ImageSpec{Image: &r.Config.Image},
Image: &runtimeapi.ImageSpec{Image: &r.Config.Image},
ImageRef: &imageID,
Mounts: mounts,
ExitCode: &exitCode,

View File

@ -24,18 +24,18 @@ import (
"github.com/stretchr/testify/assert"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
)
// A helper to create a basic config.
func makeContainerConfig(sConfig *runtimeApi.PodSandboxConfig, name, image string, attempt uint32, labels, annotations map[string]string) *runtimeApi.ContainerConfig {
return &runtimeApi.ContainerConfig{
Metadata: &runtimeApi.ContainerMetadata{
func makeContainerConfig(sConfig *runtimeapi.PodSandboxConfig, name, image string, attempt uint32, labels, annotations map[string]string) *runtimeapi.ContainerConfig {
return &runtimeapi.ContainerConfig{
Metadata: &runtimeapi.ContainerMetadata{
Name: &name,
Attempt: &attempt,
},
Image: &runtimeApi.ImageSpec{Image: &image},
Image: &runtimeapi.ImageSpec{Image: &image},
Labels: labels,
Annotations: annotations,
}
@ -48,8 +48,8 @@ func TestListContainers(t *testing.T) {
podName, namespace := "foo", "bar"
containerName, image := "sidecar", "logger"
configs := []*runtimeApi.ContainerConfig{}
sConfigs := []*runtimeApi.PodSandboxConfig{}
configs := []*runtimeapi.ContainerConfig{}
sConfigs := []*runtimeapi.PodSandboxConfig{}
for i := 0; i < 3; i++ {
s := makeSandboxConfig(fmt.Sprintf("%s%d", podName, i),
fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0)
@ -61,8 +61,8 @@ func TestListContainers(t *testing.T) {
configs = append(configs, c)
}
expected := []*runtimeApi.Container{}
state := runtimeApi.ContainerState_CONTAINER_RUNNING
expected := []*runtimeapi.Container{}
state := runtimeapi.ContainerState_CONTAINER_RUNNING
var createdAt int64 = 0
for i := range configs {
// We don't care about the sandbox id; pass a bogus one.
@ -75,7 +75,7 @@ func TestListContainers(t *testing.T) {
imageRef := "" // FakeDockerClient doesn't populate ImageRef yet.
// Prepend to the expected list because ListContainers returns
// the most recent containers first.
expected = append([]*runtimeApi.Container{{
expected = append([]*runtimeapi.Container{{
Metadata: configs[i].Metadata,
Id: &id,
PodSandboxId: &sandboxID,
@ -105,13 +105,13 @@ func TestContainerStatus(t *testing.T) {
var defaultTime time.Time
dt := defaultTime.UnixNano()
ct, st, ft := dt, dt, dt
state := runtimeApi.ContainerState_CONTAINER_CREATED
state := runtimeapi.ContainerState_CONTAINER_CREATED
// The following variables are not set in FakeDockerClient.
imageRef := DockerImageIDPrefix + ""
exitCode := int32(0)
var reason, message string
expected := &runtimeApi.ContainerStatus{
expected := &runtimeapi.ContainerStatus{
State: &state,
CreatedAt: &ct,
StartedAt: &st,
@ -122,7 +122,7 @@ func TestContainerStatus(t *testing.T) {
ExitCode: &exitCode,
Reason: &reason,
Message: &message,
Mounts: []*runtimeApi.Mount{},
Mounts: []*runtimeapi.Mount{},
Labels: config.Labels,
Annotations: config.Annotations,
}
@ -149,7 +149,7 @@ func TestContainerStatus(t *testing.T) {
// Advance the clock and start the container.
fClock.SetTime(time.Now())
*expected.StartedAt = fClock.Now().UnixNano()
*expected.State = runtimeApi.ContainerState_CONTAINER_RUNNING
*expected.State = runtimeapi.ContainerState_CONTAINER_RUNNING
err = ds.StartContainer(id)
assert.NoError(t, err)
@ -159,7 +159,7 @@ func TestContainerStatus(t *testing.T) {
// Advance the clock and stop the container.
fClock.SetTime(time.Now().Add(1 * time.Hour))
*expected.FinishedAt = fClock.Now().UnixNano()
*expected.State = runtimeApi.ContainerState_CONTAINER_EXITED
*expected.State = runtimeapi.ContainerState_CONTAINER_EXITED
*expected.Reason = "Completed"
err = ds.StopContainer(id, 0)

View File

@ -18,14 +18,14 @@ package dockershim
import (
dockertypes "github.com/docker/engine-api/types"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
)
// This file implements methods in ImageManagerService.
// ListImages lists existing images.
func (ds *dockerService) ListImages(filter *runtimeApi.ImageFilter) ([]*runtimeApi.Image, error) {
func (ds *dockerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
opts := dockertypes.ImageListOptions{}
if filter != nil {
if imgSpec := filter.GetImage(); imgSpec != nil {
@ -38,7 +38,7 @@ func (ds *dockerService) ListImages(filter *runtimeApi.ImageFilter) ([]*runtimeA
return nil, err
}
result := []*runtimeApi.Image{}
result := []*runtimeapi.Image{}
for _, i := range images {
apiImage, err := imageToRuntimeAPIImage(&i)
if err != nil {
@ -51,7 +51,7 @@ func (ds *dockerService) ListImages(filter *runtimeApi.ImageFilter) ([]*runtimeA
}
// ImageStatus returns the status of the image, returns nil if the image doesn't present.
func (ds *dockerService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi.Image, error) {
func (ds *dockerService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
imageInspect, err := ds.client.InspectImageByRef(image.GetImage())
if err != nil {
if dockertools.IsImageNotFoundError(err) {
@ -63,7 +63,7 @@ func (ds *dockerService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi.I
}
// PullImage pulls an image with authentication config.
func (ds *dockerService) PullImage(image *runtimeApi.ImageSpec, auth *runtimeApi.AuthConfig) error {
func (ds *dockerService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) error {
return ds.client.PullImage(image.GetImage(),
dockertypes.AuthConfig{
Username: auth.GetUsername(),
@ -77,7 +77,7 @@ func (ds *dockerService) PullImage(image *runtimeApi.ImageSpec, auth *runtimeApi
}
// RemoveImage removes the image.
func (ds *dockerService) RemoveImage(image *runtimeApi.ImageSpec) error {
func (ds *dockerService) RemoveImage(image *runtimeapi.ImageSpec) error {
// If the image has multiple tags, we need to remove all the tags
// TODO: We assume image.Image is image ID here, which is true in the current implementation
// of kubelet, but we should still clarify this in CRI.

View File

@ -21,7 +21,7 @@ import (
dockertypes "github.com/docker/engine-api/types"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
)
@ -29,7 +29,7 @@ func TestRemoveImage(t *testing.T) {
ds, fakeDocker, _ := newTestDockerService()
id := "1111"
fakeDocker.Image = &dockertypes.ImageInspect{ID: id, RepoTags: []string{"foo"}}
ds.RemoveImage(&runtimeApi.ImageSpec{Image: &id})
ds.RemoveImage(&runtimeapi.ImageSpec{Image: &id})
fakeDocker.AssertCallDetails(dockertools.NewCalledDetail("inspect_image", nil),
dockertools.NewCalledDetail("remove_image", []interface{}{id, dockertypes.ImageRemoveOptions{PruneChildren: true}}))
}
@ -38,7 +38,7 @@ func TestRemoveImageWithMultipleTags(t *testing.T) {
ds, fakeDocker, _ := newTestDockerService()
id := "1111"
fakeDocker.Image = &dockertypes.ImageInspect{ID: id, RepoTags: []string{"foo", "bar"}}
ds.RemoveImage(&runtimeApi.ImageSpec{Image: &id})
ds.RemoveImage(&runtimeapi.ImageSpec{Image: &id})
fakeDocker.AssertCallDetails(dockertools.NewCalledDetail("inspect_image", nil),
dockertools.NewCalledDetail("remove_image", []interface{}{"foo", dockertypes.ImageRemoveOptions{PruneChildren: true}}),
dockertools.NewCalledDetail("remove_image", []interface{}{"bar", dockertypes.ImageRemoveOptions{PruneChildren: true}}))

View File

@ -24,7 +24,7 @@ import (
dockerfilters "github.com/docker/engine-api/types/filters"
"github.com/golang/glog"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/kubelet/types"
@ -48,7 +48,7 @@ const (
// For docker, PodSandbox is implemented by a container holding the network
// namespace for the pod.
// Note: docker doesn't use LogDirectory (yet).
func (ds *dockerService) RunPodSandbox(config *runtimeApi.PodSandboxConfig) (string, error) {
func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error) {
// Step 1: Pull the image for the sandbox.
image := defaultSandboxImage
podSandboxImage := ds.podSandboxImage
@ -179,7 +179,7 @@ func (ds *dockerService) getIP(sandbox *dockertypes.ContainerJSON) (string, erro
}
// PodSandboxStatus returns the status of the PodSandbox.
func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeApi.PodSandboxStatus, error) {
func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) {
// Inspect the container.
r, err := ds.client.InspectContainer(podSandboxID)
if err != nil {
@ -194,15 +194,15 @@ func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeApi.PodS
ct := createdAt.UnixNano()
// Translate container to sandbox state.
state := runtimeApi.PodSandboxState_SANDBOX_NOTREADY
state := runtimeapi.PodSandboxState_SANDBOX_NOTREADY
if r.State.Running {
state = runtimeApi.PodSandboxState_SANDBOX_READY
state = runtimeapi.PodSandboxState_SANDBOX_READY
}
IP, err := ds.getIP(r)
if err != nil {
return nil, err
}
network := &runtimeApi.PodSandboxNetworkStatus{Ip: &IP}
network := &runtimeapi.PodSandboxNetworkStatus{Ip: &IP}
netNS := getNetworkNamespace(r)
metadata, err := parseSandboxName(r.Name)
@ -211,7 +211,7 @@ func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeApi.PodS
}
hostNetwork := sharesHostNetwork(r)
labels, annotations := extractLabels(r.Config.Labels)
return &runtimeApi.PodSandboxStatus{
return &runtimeapi.PodSandboxStatus{
Id: &r.ID,
State: &state,
CreatedAt: &ct,
@ -219,10 +219,10 @@ func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeApi.PodS
Labels: labels,
Annotations: annotations,
Network: network,
Linux: &runtimeApi.LinuxPodSandboxStatus{
Namespaces: &runtimeApi.Namespace{
Linux: &runtimeapi.LinuxPodSandboxStatus{
Namespaces: &runtimeapi.Namespace{
Network: &netNS,
Options: &runtimeApi.NamespaceOption{
Options: &runtimeapi.NamespaceOption{
HostNetwork: &hostNetwork,
},
},
@ -231,7 +231,7 @@ func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeApi.PodS
}
// ListPodSandbox returns a list of Sandbox.
func (ds *dockerService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]*runtimeApi.PodSandbox, error) {
func (ds *dockerService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
// By default, list all containers whether they are running or not.
opts := dockertypes.ContainerListOptions{All: true}
filterOutReadySandboxes := false
@ -246,11 +246,11 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]
f.Add("id", filter.GetId())
}
if filter.State != nil {
if filter.GetState() == runtimeApi.PodSandboxState_SANDBOX_READY {
if filter.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
// Only list running containers.
opts.All = false
} else {
// runtimeApi.PodSandboxState_SANDBOX_NOTREADY can mean the
// runtimeapi.PodSandboxState_SANDBOX_NOTREADY can mean the
// container is in any of the non-running state (e.g., created,
// exited). We can't tell docker to filter out running
// containers directly, so we'll need to filter them out
@ -271,7 +271,7 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]
}
// Convert docker containers to runtime api sandboxes.
result := []*runtimeApi.PodSandbox{}
result := []*runtimeapi.PodSandbox{}
for i := range containers {
c := containers[i]
converted, err := toRuntimeAPISandbox(&c)
@ -279,7 +279,7 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]
glog.V(4).Infof("Unable to convert docker to runtime API sandbox: %v", err)
continue
}
if filterOutReadySandboxes && converted.GetState() == runtimeApi.PodSandboxState_SANDBOX_READY {
if filterOutReadySandboxes && converted.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
continue
}
@ -289,7 +289,7 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]
}
// applySandboxLinuxOptions applies LinuxPodSandboxConfig to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig.
func (ds *dockerService) applySandboxLinuxOptions(hc *dockercontainer.HostConfig, lc *runtimeApi.LinuxPodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string) error {
func (ds *dockerService) applySandboxLinuxOptions(hc *dockercontainer.HostConfig, lc *runtimeapi.LinuxPodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string) error {
// Apply Cgroup options.
// TODO: Check if this works with per-pod cgroups.
hc.CgroupParent = lc.GetCgroupParent()
@ -299,8 +299,8 @@ func (ds *dockerService) applySandboxLinuxOptions(hc *dockercontainer.HostConfig
return nil
}
// makeSandboxDockerConfig returns dockertypes.ContainerCreateConfig based on runtimeApi.PodSandboxConfig.
func (ds *dockerService) makeSandboxDockerConfig(c *runtimeApi.PodSandboxConfig, image string) (*dockertypes.ContainerCreateConfig, error) {
// makeSandboxDockerConfig returns dockertypes.ContainerCreateConfig based on runtimeapi.PodSandboxConfig.
func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig, image string) (*dockertypes.ContainerCreateConfig, error) {
// Merge annotations and labels because docker supports only labels.
labels := makeLabels(c.GetLabels(), c.GetAnnotations())
// Apply a label to distinguish sandboxes from regular containers.

View File

@ -24,19 +24,19 @@ import (
"github.com/stretchr/testify/assert"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/types"
)
// A helper to create a basic config.
func makeSandboxConfig(name, namespace, uid string, attempt uint32) *runtimeApi.PodSandboxConfig {
func makeSandboxConfig(name, namespace, uid string, attempt uint32) *runtimeapi.PodSandboxConfig {
return makeSandboxConfigWithLabelsAndAnnotations(name, namespace, uid, attempt, map[string]string{}, map[string]string{})
}
func makeSandboxConfigWithLabelsAndAnnotations(name, namespace, uid string, attempt uint32, labels, annotations map[string]string) *runtimeApi.PodSandboxConfig {
return &runtimeApi.PodSandboxConfig{
Metadata: &runtimeApi.PodSandboxMetadata{
func makeSandboxConfigWithLabelsAndAnnotations(name, namespace, uid string, attempt uint32, labels, annotations map[string]string) *runtimeapi.PodSandboxConfig {
return &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: &name,
Namespace: &namespace,
Uid: &uid,
@ -52,7 +52,7 @@ func makeSandboxConfigWithLabelsAndAnnotations(name, namespace, uid string, atte
func TestListSandboxes(t *testing.T) {
ds, _, _ := newTestDockerService()
name, namespace := "foo", "bar"
configs := []*runtimeApi.PodSandboxConfig{}
configs := []*runtimeapi.PodSandboxConfig{}
for i := 0; i < 3; i++ {
c := makeSandboxConfigWithLabelsAndAnnotations(fmt.Sprintf("%s%d", name, i),
fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0,
@ -62,15 +62,15 @@ func TestListSandboxes(t *testing.T) {
configs = append(configs, c)
}
expected := []*runtimeApi.PodSandbox{}
state := runtimeApi.PodSandboxState_SANDBOX_READY
expected := []*runtimeapi.PodSandbox{}
state := runtimeapi.PodSandboxState_SANDBOX_READY
var createdAt int64 = 0
for i := range configs {
id, err := ds.RunPodSandbox(configs[i])
assert.NoError(t, err)
// Prepend to the expected list because ListPodSandbox returns
// the most recent sandbox first.
expected = append([]*runtimeApi.PodSandbox{{
expected = append([]*runtimeapi.PodSandbox{{
Metadata: configs[i].Metadata,
Id: &id,
State: &state,
@ -98,15 +98,15 @@ func TestSandboxStatus(t *testing.T) {
fakeIP := "2.3.4.5"
fakeNS := fmt.Sprintf("/proc/%d/ns/net", os.Getpid())
state := runtimeApi.PodSandboxState_SANDBOX_READY
state := runtimeapi.PodSandboxState_SANDBOX_READY
ct := int64(0)
hostNetwork := false
expected := &runtimeApi.PodSandboxStatus{
expected := &runtimeapi.PodSandboxStatus{
State: &state,
CreatedAt: &ct,
Metadata: config.Metadata,
Network: &runtimeApi.PodSandboxNetworkStatus{Ip: &fakeIP},
Linux: &runtimeApi.LinuxPodSandboxStatus{Namespaces: &runtimeApi.Namespace{Network: &fakeNS, Options: &runtimeApi.NamespaceOption{HostNetwork: &hostNetwork}}},
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: &fakeIP},
Linux: &runtimeapi.LinuxPodSandboxStatus{Namespaces: &runtimeapi.Namespace{Network: &fakeNS, Options: &runtimeapi.NamespaceOption{HostNetwork: &hostNetwork}}},
Labels: labels,
Annotations: annotations,
}
@ -128,7 +128,7 @@ func TestSandboxStatus(t *testing.T) {
assert.Equal(t, expected, status)
// Stop the sandbox.
*expected.State = runtimeApi.PodSandboxState_SANDBOX_NOTREADY
*expected.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
err = ds.StopPodSandbox(id)
assert.NoError(t, err)
status, err = ds.PodSandboxStatus(id)
@ -186,9 +186,9 @@ func TestHostNetworkPluginInvocation(t *testing.T) {
map[string]string{"annotation": ns},
)
hostNetwork := true
c.Linux = &runtimeApi.LinuxPodSandboxConfig{
SecurityContext: &runtimeApi.LinuxSandboxSecurityContext{
NamespaceOptions: &runtimeApi.NamespaceOption{
c.Linux = &runtimeapi.LinuxPodSandboxConfig{
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
NamespaceOptions: &runtimeapi.NamespaceOption{
HostNetwork: &hostNetwork,
},
},

View File

@ -24,8 +24,8 @@ import (
"github.com/golang/protobuf/proto"
"k8s.io/kubernetes/pkg/apis/componentconfig"
internalApi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/cm"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
@ -141,8 +141,8 @@ func NewDockerService(client dockertools.DockerInterface, seccompProfileRoot str
// DockerService is an interface that embeds the new RuntimeService and
// ImageService interfaces.
type DockerService interface {
internalApi.RuntimeService
internalApi.ImageManagerService
internalapi.RuntimeService
internalapi.ImageManagerService
Start() error
// For serving streaming calls.
http.Handler
@ -160,7 +160,7 @@ type dockerService struct {
}
// Version returns the runtime name, runtime version and runtime API version
func (ds *dockerService) Version(_ string) (*runtimeApi.VersionResponse, error) {
func (ds *dockerService) Version(_ string) (*runtimeapi.VersionResponse, error) {
v, err := ds.client.Version()
if err != nil {
return nil, fmt.Errorf("docker: failed to get docker version: %v", err)
@ -170,7 +170,7 @@ func (ds *dockerService) Version(_ string) (*runtimeApi.VersionResponse, error)
// Docker API version (e.g., 1.23) is not semver compatible. Add a ".0"
// suffix to remedy this.
apiVersion := fmt.Sprintf("%s.0", v.APIVersion)
return &runtimeApi.VersionResponse{
return &runtimeapi.VersionResponse{
Version: &runtimeAPIVersion,
RuntimeName: &name,
RuntimeVersion: &v.Version,
@ -179,7 +179,7 @@ func (ds *dockerService) Version(_ string) (*runtimeApi.VersionResponse, error)
}
// UpdateRuntimeConfig updates the runtime config. Currently only handles podCIDR updates.
func (ds *dockerService) UpdateRuntimeConfig(runtimeConfig *runtimeApi.RuntimeConfig) (err error) {
func (ds *dockerService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) (err error) {
if runtimeConfig == nil {
return
}
@ -224,16 +224,16 @@ func (ds *dockerService) Start() error {
// Status returns the status of the runtime.
// TODO(random-liu): Set network condition accordingly here.
func (ds *dockerService) Status() (*runtimeApi.RuntimeStatus, error) {
runtimeReady := &runtimeApi.RuntimeCondition{
Type: proto.String(runtimeApi.RuntimeReady),
func (ds *dockerService) Status() (*runtimeapi.RuntimeStatus, error) {
runtimeReady := &runtimeapi.RuntimeCondition{
Type: proto.String(runtimeapi.RuntimeReady),
Status: proto.Bool(true),
}
networkReady := &runtimeApi.RuntimeCondition{
Type: proto.String(runtimeApi.NetworkReady),
networkReady := &runtimeapi.RuntimeCondition{
Type: proto.String(runtimeapi.NetworkReady),
Status: proto.Bool(true),
}
conditions := []*runtimeApi.RuntimeCondition{runtimeReady, networkReady}
conditions := []*runtimeapi.RuntimeCondition{runtimeReady, networkReady}
if _, err := ds.client.Version(); err != nil {
runtimeReady.Status = proto.Bool(false)
runtimeReady.Reason = proto.String("DockerDaemonNotReady")
@ -244,7 +244,7 @@ func (ds *dockerService) Status() (*runtimeApi.RuntimeStatus, error) {
networkReady.Reason = proto.String("NetworkPluginNotReady")
networkReady.Message = proto.String(fmt.Sprintf("docker: network plugin is not ready: %v", err))
}
return &runtimeApi.RuntimeStatus{Conditions: conditions}, nil
return &runtimeapi.RuntimeStatus{Conditions: conditions}, nil
}
func (ds *dockerService) ServeHTTP(w http.ResponseWriter, r *http.Request) {

View File

@ -24,7 +24,7 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/kubelet/network"
@ -48,7 +48,7 @@ func newTestDockerService() (*dockerService, *dockertools.FakeDockerClient, *clo
func TestStatus(t *testing.T) {
ds, fDocker, _ := newTestDockerService()
assertStatus := func(expected map[string]bool, status *runtimeApi.RuntimeStatus) {
assertStatus := func(expected map[string]bool, status *runtimeapi.RuntimeStatus) {
conditions := status.GetConditions()
assert.Equal(t, len(expected), len(conditions))
for k, v := range expected {
@ -64,8 +64,8 @@ func TestStatus(t *testing.T) {
status, err := ds.Status()
assert.NoError(t, err)
assertStatus(map[string]bool{
runtimeApi.RuntimeReady: true,
runtimeApi.NetworkReady: true,
runtimeapi.RuntimeReady: true,
runtimeapi.NetworkReady: true,
}, status)
// Should not report ready status if version returns error.
@ -73,8 +73,8 @@ func TestStatus(t *testing.T) {
status, err = ds.Status()
assert.NoError(t, err)
assertStatus(map[string]bool{
runtimeApi.RuntimeReady: false,
runtimeApi.NetworkReady: true,
runtimeapi.RuntimeReady: false,
runtimeapi.NetworkReady: true,
}, status)
// Should not report ready status is network plugin returns error.
@ -85,7 +85,7 @@ func TestStatus(t *testing.T) {
status, err = ds.Status()
assert.NoError(t, err)
assertStatus(map[string]bool{
runtimeApi.RuntimeReady: true,
runtimeApi.NetworkReady: false,
runtimeapi.RuntimeReady: true,
runtimeapi.NetworkReady: false,
}, status)
}

View File

@ -29,7 +29,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/kubelet/types"
)
@ -62,7 +62,7 @@ func (v apiVersion) Compare(other string) (int, error) {
// generateEnvList converts KeyValue list to a list of strings, in the form of
// '<key>=<value>', which can be understood by docker.
func generateEnvList(envs []*runtimeApi.KeyValue) (result []string) {
func generateEnvList(envs []*runtimeapi.KeyValue) (result []string) {
for _, env := range envs {
result = append(result, fmt.Sprintf("%s=%s", env.GetKey(), env.GetValue()))
}
@ -127,7 +127,7 @@ func extractLabels(input map[string]string) (map[string]string, map[string]strin
// '<HostPath>:<ContainerPath>:ro', if the path is read only, or
// '<HostPath>:<ContainerPath>:Z', if the volume requires SELinux
// relabeling and the pod provides an SELinux label
func generateMountBindings(mounts []*runtimeApi.Mount) (result []string) {
func generateMountBindings(mounts []*runtimeapi.Mount) (result []string) {
for _, m := range mounts {
bind := fmt.Sprintf("%s:%s", m.GetHostPath(), m.GetContainerPath())
readOnly := m.GetReadonly()
@ -150,7 +150,7 @@ func generateMountBindings(mounts []*runtimeApi.Mount) (result []string) {
return
}
func makePortsAndBindings(pm []*runtimeApi.PortMapping) (map[dockernat.Port]struct{}, map[dockernat.Port][]dockernat.PortBinding) {
func makePortsAndBindings(pm []*runtimeapi.PortMapping) (map[dockernat.Port]struct{}, map[dockernat.Port][]dockernat.PortBinding) {
exposedPorts := map[dockernat.Port]struct{}{}
portBindings := map[dockernat.Port][]dockernat.PortBinding{}
for _, port := range pm {
@ -198,7 +198,7 @@ func makePortsAndBindings(pm []*runtimeApi.PortMapping) (map[dockernat.Port]stru
// getContainerSecurityOpt gets container security options from container and sandbox config, currently from sandbox
// annotations.
// It is an experimental feature and may be promoted to official runtime api in the future.
func getContainerSecurityOpts(containerName string, sandboxConfig *runtimeApi.PodSandboxConfig, seccompProfileRoot string) ([]string, error) {
func getContainerSecurityOpts(containerName string, sandboxConfig *runtimeapi.PodSandboxConfig, seccompProfileRoot string) ([]string, error) {
appArmorOpts, err := dockertools.GetAppArmorOpts(sandboxConfig.GetAnnotations(), containerName)
if err != nil {
return nil, err
@ -216,7 +216,7 @@ func getContainerSecurityOpts(containerName string, sandboxConfig *runtimeApi.Po
return opts, nil
}
func getSandboxSecurityOpts(sandboxConfig *runtimeApi.PodSandboxConfig, seccompProfileRoot string) ([]string, error) {
func getSandboxSecurityOpts(sandboxConfig *runtimeapi.PodSandboxConfig, seccompProfileRoot string) ([]string, error) {
// sandboxContainerName doesn't exist in the pod, so pod security options will be returned by default.
return getContainerSecurityOpts(sandboxContainerName, sandboxConfig, seccompProfileRoot)
}

View File

@ -23,7 +23,7 @@ import (
"github.com/stretchr/testify/require"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/security/apparmor"
)
@ -43,13 +43,13 @@ func TestLabelsAndAnnotationsRoundTrip(t *testing.T) {
// TODO: Migrate the corresponding test to dockershim.
func TestGetContainerSecurityOpts(t *testing.T) {
containerName := "bar"
makeConfig := func(annotations map[string]string) *runtimeApi.PodSandboxConfig {
makeConfig := func(annotations map[string]string) *runtimeapi.PodSandboxConfig {
return makeSandboxConfigWithLabelsAndAnnotations("pod", "ns", "1234", 1, nil, annotations)
}
tests := []struct {
msg string
config *runtimeApi.PodSandboxConfig
config *runtimeapi.PodSandboxConfig
expectedOpts []string
}{{
msg: "No security annotations",
@ -106,13 +106,13 @@ func TestGetContainerSecurityOpts(t *testing.T) {
// TestGetSandboxSecurityOpts tests the logic of generating sandbox security options from sandbox annotations.
func TestGetSandboxSecurityOpts(t *testing.T) {
makeConfig := func(annotations map[string]string) *runtimeApi.PodSandboxConfig {
makeConfig := func(annotations map[string]string) *runtimeapi.PodSandboxConfig {
return makeSandboxConfigWithLabelsAndAnnotations("pod", "ns", "1234", 1, nil, annotations)
}
tests := []struct {
msg string
config *runtimeApi.PodSandboxConfig
config *runtimeapi.PodSandboxConfig
expectedOpts []string
}{{
msg: "No security annotations",

View File

@ -21,7 +21,7 @@ import (
"strconv"
"strings"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/kubelet/leaky"
)
@ -55,7 +55,7 @@ const (
DockerPullableImageIDPrefix = dockertools.DockerPullablePrefix
)
func makeSandboxName(s *runtimeApi.PodSandboxConfig) string {
func makeSandboxName(s *runtimeapi.PodSandboxConfig) string {
return strings.Join([]string{
kubePrefix, // 0
sandboxContainerName, // 1
@ -66,7 +66,7 @@ func makeSandboxName(s *runtimeApi.PodSandboxConfig) string {
}, nameDelimiter)
}
func makeContainerName(s *runtimeApi.PodSandboxConfig, c *runtimeApi.ContainerConfig) string {
func makeContainerName(s *runtimeapi.PodSandboxConfig, c *runtimeapi.ContainerConfig) string {
return strings.Join([]string{
kubePrefix, // 0
c.Metadata.GetName(), // 1:
@ -87,7 +87,7 @@ func parseUint32(s string) (uint32, error) {
}
// TODO: Evaluate whether we should rely on labels completely.
func parseSandboxName(name string) (*runtimeApi.PodSandboxMetadata, error) {
func parseSandboxName(name string) (*runtimeapi.PodSandboxMetadata, error) {
// Docker adds a "/" prefix to names. so trim it.
name = strings.TrimPrefix(name, "/")
@ -104,7 +104,7 @@ func parseSandboxName(name string) (*runtimeApi.PodSandboxMetadata, error) {
return nil, fmt.Errorf("failed to parse the sandbox name %q: %v", name, err)
}
return &runtimeApi.PodSandboxMetadata{
return &runtimeapi.PodSandboxMetadata{
Name: &parts[2],
Namespace: &parts[3],
Uid: &parts[4],
@ -113,7 +113,7 @@ func parseSandboxName(name string) (*runtimeApi.PodSandboxMetadata, error) {
}
// TODO: Evaluate whether we should rely on labels completely.
func parseContainerName(name string) (*runtimeApi.ContainerMetadata, error) {
func parseContainerName(name string) (*runtimeapi.ContainerMetadata, error) {
// Docker adds a "/" prefix to names. so trim it.
name = strings.TrimPrefix(name, "/")
@ -130,7 +130,7 @@ func parseContainerName(name string) (*runtimeApi.ContainerMetadata, error) {
return nil, fmt.Errorf("failed to parse the container name %q: %v", name, err)
}
return &runtimeApi.ContainerMetadata{
return &runtimeapi.ContainerMetadata{
Name: &parts[1],
Attempt: &attempt,
}, nil

View File

@ -21,7 +21,7 @@ import (
"github.com/stretchr/testify/assert"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
func TestSandboxNameRoundTrip(t *testing.T) {
@ -53,8 +53,8 @@ func TestNonParsableSandboxNames(t *testing.T) {
func TestContainerNameRoundTrip(t *testing.T) {
sConfig := makeSandboxConfig("foo", "bar", "iamuid", 3)
name, attempt := "pause", uint32(5)
config := &runtimeApi.ContainerConfig{
Metadata: &runtimeApi.ContainerMetadata{
config := &runtimeapi.ContainerConfig{
Metadata: &runtimeapi.ContainerMetadata{
Name: &name,
Attempt: &attempt,
},

View File

@ -25,7 +25,7 @@ import (
"github.com/golang/glog"
"google.golang.org/grpc"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockershim"
"k8s.io/kubernetes/pkg/util/interrupt"
)
@ -69,8 +69,8 @@ func (s *DockerServer) Start() error {
}
// Create the grpc server and register runtime and image services.
s.server = grpc.NewServer()
runtimeApi.RegisterRuntimeServiceServer(s.server, s.service)
runtimeApi.RegisterImageServiceServer(s.server, s.service)
runtimeapi.RegisterRuntimeServiceServer(s.server, s.service)
runtimeapi.RegisterImageServiceServer(s.server, s.service)
go func() {
// Use interrupt handler to make sure the server to be stopped properly.
h := interrupt.New(nil, s.Stop)

View File

@ -21,16 +21,16 @@ import (
"golang.org/x/net/context"
internalApi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockershim"
utilexec "k8s.io/kubernetes/pkg/util/exec"
)
// DockerService is the interface implement CRI remote service server.
type DockerService interface {
runtimeApi.RuntimeServiceServer
runtimeApi.ImageServiceServer
runtimeapi.RuntimeServiceServer
runtimeapi.ImageServiceServer
}
// dockerService uses dockershim service to implement DockerService.
@ -38,115 +38,115 @@ type DockerService interface {
// TODO(random-liu): Change the dockershim service to support context, and implement
// internal services and remote services with the dockershim service.
type dockerService struct {
runtimeService internalApi.RuntimeService
imageService internalApi.ImageManagerService
runtimeService internalapi.RuntimeService
imageService internalapi.ImageManagerService
}
func NewDockerService(s dockershim.DockerService) DockerService {
return &dockerService{runtimeService: s, imageService: s}
}
func (d *dockerService) Version(ctx context.Context, r *runtimeApi.VersionRequest) (*runtimeApi.VersionResponse, error) {
func (d *dockerService) Version(ctx context.Context, r *runtimeapi.VersionRequest) (*runtimeapi.VersionResponse, error) {
return d.runtimeService.Version(r.GetVersion())
}
func (d *dockerService) Status(ctx context.Context, r *runtimeApi.StatusRequest) (*runtimeApi.StatusResponse, error) {
func (d *dockerService) Status(ctx context.Context, r *runtimeapi.StatusRequest) (*runtimeapi.StatusResponse, error) {
status, err := d.runtimeService.Status()
if err != nil {
return nil, err
}
return &runtimeApi.StatusResponse{Status: status}, nil
return &runtimeapi.StatusResponse{Status: status}, nil
}
func (d *dockerService) RunPodSandbox(ctx context.Context, r *runtimeApi.RunPodSandboxRequest) (*runtimeApi.RunPodSandboxResponse, error) {
func (d *dockerService) RunPodSandbox(ctx context.Context, r *runtimeapi.RunPodSandboxRequest) (*runtimeapi.RunPodSandboxResponse, error) {
podSandboxId, err := d.runtimeService.RunPodSandbox(r.GetConfig())
if err != nil {
return nil, err
}
return &runtimeApi.RunPodSandboxResponse{PodSandboxId: &podSandboxId}, nil
return &runtimeapi.RunPodSandboxResponse{PodSandboxId: &podSandboxId}, nil
}
func (d *dockerService) StopPodSandbox(ctx context.Context, r *runtimeApi.StopPodSandboxRequest) (*runtimeApi.StopPodSandboxResponse, error) {
func (d *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopPodSandboxRequest) (*runtimeapi.StopPodSandboxResponse, error) {
err := d.runtimeService.StopPodSandbox(r.GetPodSandboxId())
if err != nil {
return nil, err
}
return &runtimeApi.StopPodSandboxResponse{}, nil
return &runtimeapi.StopPodSandboxResponse{}, nil
}
func (d *dockerService) RemovePodSandbox(ctx context.Context, r *runtimeApi.RemovePodSandboxRequest) (*runtimeApi.RemovePodSandboxResponse, error) {
func (d *dockerService) RemovePodSandbox(ctx context.Context, r *runtimeapi.RemovePodSandboxRequest) (*runtimeapi.RemovePodSandboxResponse, error) {
err := d.runtimeService.RemovePodSandbox(r.GetPodSandboxId())
if err != nil {
return nil, err
}
return &runtimeApi.RemovePodSandboxResponse{}, nil
return &runtimeapi.RemovePodSandboxResponse{}, nil
}
func (d *dockerService) PodSandboxStatus(ctx context.Context, r *runtimeApi.PodSandboxStatusRequest) (*runtimeApi.PodSandboxStatusResponse, error) {
func (d *dockerService) PodSandboxStatus(ctx context.Context, r *runtimeapi.PodSandboxStatusRequest) (*runtimeapi.PodSandboxStatusResponse, error) {
podSandboxStatus, err := d.runtimeService.PodSandboxStatus(r.GetPodSandboxId())
if err != nil {
return nil, err
}
return &runtimeApi.PodSandboxStatusResponse{Status: podSandboxStatus}, nil
return &runtimeapi.PodSandboxStatusResponse{Status: podSandboxStatus}, nil
}
func (d *dockerService) ListPodSandbox(ctx context.Context, r *runtimeApi.ListPodSandboxRequest) (*runtimeApi.ListPodSandboxResponse, error) {
func (d *dockerService) ListPodSandbox(ctx context.Context, r *runtimeapi.ListPodSandboxRequest) (*runtimeapi.ListPodSandboxResponse, error) {
items, err := d.runtimeService.ListPodSandbox(r.GetFilter())
if err != nil {
return nil, err
}
return &runtimeApi.ListPodSandboxResponse{Items: items}, nil
return &runtimeapi.ListPodSandboxResponse{Items: items}, nil
}
func (d *dockerService) CreateContainer(ctx context.Context, r *runtimeApi.CreateContainerRequest) (*runtimeApi.CreateContainerResponse, error) {
func (d *dockerService) CreateContainer(ctx context.Context, r *runtimeapi.CreateContainerRequest) (*runtimeapi.CreateContainerResponse, error) {
containerId, err := d.runtimeService.CreateContainer(r.GetPodSandboxId(), r.GetConfig(), r.GetSandboxConfig())
if err != nil {
return nil, err
}
return &runtimeApi.CreateContainerResponse{ContainerId: &containerId}, nil
return &runtimeapi.CreateContainerResponse{ContainerId: &containerId}, nil
}
func (d *dockerService) StartContainer(ctx context.Context, r *runtimeApi.StartContainerRequest) (*runtimeApi.StartContainerResponse, error) {
func (d *dockerService) StartContainer(ctx context.Context, r *runtimeapi.StartContainerRequest) (*runtimeapi.StartContainerResponse, error) {
err := d.runtimeService.StartContainer(r.GetContainerId())
if err != nil {
return nil, err
}
return &runtimeApi.StartContainerResponse{}, nil
return &runtimeapi.StartContainerResponse{}, nil
}
func (d *dockerService) StopContainer(ctx context.Context, r *runtimeApi.StopContainerRequest) (*runtimeApi.StopContainerResponse, error) {
func (d *dockerService) StopContainer(ctx context.Context, r *runtimeapi.StopContainerRequest) (*runtimeapi.StopContainerResponse, error) {
err := d.runtimeService.StopContainer(r.GetContainerId(), r.GetTimeout())
if err != nil {
return nil, err
}
return &runtimeApi.StopContainerResponse{}, nil
return &runtimeapi.StopContainerResponse{}, nil
}
func (d *dockerService) RemoveContainer(ctx context.Context, r *runtimeApi.RemoveContainerRequest) (*runtimeApi.RemoveContainerResponse, error) {
func (d *dockerService) RemoveContainer(ctx context.Context, r *runtimeapi.RemoveContainerRequest) (*runtimeapi.RemoveContainerResponse, error) {
err := d.runtimeService.RemoveContainer(r.GetContainerId())
if err != nil {
return nil, err
}
return &runtimeApi.RemoveContainerResponse{}, nil
return &runtimeapi.RemoveContainerResponse{}, nil
}
func (d *dockerService) ListContainers(ctx context.Context, r *runtimeApi.ListContainersRequest) (*runtimeApi.ListContainersResponse, error) {
func (d *dockerService) ListContainers(ctx context.Context, r *runtimeapi.ListContainersRequest) (*runtimeapi.ListContainersResponse, error) {
containers, err := d.runtimeService.ListContainers(r.GetFilter())
if err != nil {
return nil, err
}
return &runtimeApi.ListContainersResponse{Containers: containers}, nil
return &runtimeapi.ListContainersResponse{Containers: containers}, nil
}
func (d *dockerService) ContainerStatus(ctx context.Context, r *runtimeApi.ContainerStatusRequest) (*runtimeApi.ContainerStatusResponse, error) {
func (d *dockerService) ContainerStatus(ctx context.Context, r *runtimeapi.ContainerStatusRequest) (*runtimeapi.ContainerStatusResponse, error) {
status, err := d.runtimeService.ContainerStatus(r.GetContainerId())
if err != nil {
return nil, err
}
return &runtimeApi.ContainerStatusResponse{Status: status}, nil
return &runtimeapi.ContainerStatusResponse{Status: status}, nil
}
func (d *dockerService) ExecSync(ctx context.Context, r *runtimeApi.ExecSyncRequest) (*runtimeApi.ExecSyncResponse, error) {
func (d *dockerService) ExecSync(ctx context.Context, r *runtimeapi.ExecSyncRequest) (*runtimeapi.ExecSyncResponse, error) {
stdout, stderr, err := d.runtimeService.ExecSync(r.GetContainerId(), r.GetCmd(), time.Duration(r.GetTimeout())*time.Second)
var exitCode int32
if err != nil {
@ -156,61 +156,61 @@ func (d *dockerService) ExecSync(ctx context.Context, r *runtimeApi.ExecSyncRequ
}
exitCode = int32(exitError.ExitStatus())
}
return &runtimeApi.ExecSyncResponse{
return &runtimeapi.ExecSyncResponse{
Stdout: stdout,
Stderr: stderr,
ExitCode: &exitCode,
}, nil
}
func (d *dockerService) Exec(ctx context.Context, r *runtimeApi.ExecRequest) (*runtimeApi.ExecResponse, error) {
func (d *dockerService) Exec(ctx context.Context, r *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
return d.runtimeService.Exec(r)
}
func (d *dockerService) Attach(ctx context.Context, r *runtimeApi.AttachRequest) (*runtimeApi.AttachResponse, error) {
func (d *dockerService) Attach(ctx context.Context, r *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
return d.runtimeService.Attach(r)
}
func (d *dockerService) PortForward(ctx context.Context, r *runtimeApi.PortForwardRequest) (*runtimeApi.PortForwardResponse, error) {
func (d *dockerService) PortForward(ctx context.Context, r *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
return d.runtimeService.PortForward(r)
}
func (d *dockerService) UpdateRuntimeConfig(ctx context.Context, r *runtimeApi.UpdateRuntimeConfigRequest) (*runtimeApi.UpdateRuntimeConfigResponse, error) {
func (d *dockerService) UpdateRuntimeConfig(ctx context.Context, r *runtimeapi.UpdateRuntimeConfigRequest) (*runtimeapi.UpdateRuntimeConfigResponse, error) {
err := d.runtimeService.UpdateRuntimeConfig(r.GetRuntimeConfig())
if err != nil {
return nil, err
}
return &runtimeApi.UpdateRuntimeConfigResponse{}, nil
return &runtimeapi.UpdateRuntimeConfigResponse{}, nil
}
func (d *dockerService) ListImages(ctx context.Context, r *runtimeApi.ListImagesRequest) (*runtimeApi.ListImagesResponse, error) {
func (d *dockerService) ListImages(ctx context.Context, r *runtimeapi.ListImagesRequest) (*runtimeapi.ListImagesResponse, error) {
images, err := d.imageService.ListImages(r.GetFilter())
if err != nil {
return nil, err
}
return &runtimeApi.ListImagesResponse{Images: images}, nil
return &runtimeapi.ListImagesResponse{Images: images}, nil
}
func (d *dockerService) ImageStatus(ctx context.Context, r *runtimeApi.ImageStatusRequest) (*runtimeApi.ImageStatusResponse, error) {
func (d *dockerService) ImageStatus(ctx context.Context, r *runtimeapi.ImageStatusRequest) (*runtimeapi.ImageStatusResponse, error) {
image, err := d.imageService.ImageStatus(r.GetImage())
if err != nil {
return nil, err
}
return &runtimeApi.ImageStatusResponse{Image: image}, nil
return &runtimeapi.ImageStatusResponse{Image: image}, nil
}
func (d *dockerService) PullImage(ctx context.Context, r *runtimeApi.PullImageRequest) (*runtimeApi.PullImageResponse, error) {
func (d *dockerService) PullImage(ctx context.Context, r *runtimeapi.PullImageRequest) (*runtimeapi.PullImageResponse, error) {
err := d.imageService.PullImage(r.GetImage(), r.GetAuth())
if err != nil {
return nil, err
}
return &runtimeApi.PullImageResponse{}, nil
return &runtimeapi.PullImageResponse{}, nil
}
func (d *dockerService) RemoveImage(ctx context.Context, r *runtimeApi.RemoveImageRequest) (*runtimeApi.RemoveImageResponse, error) {
func (d *dockerService) RemoveImage(ctx context.Context, r *runtimeapi.RemoveImageRequest) (*runtimeapi.RemoveImageResponse, error) {
err := d.imageService.RemoveImage(r.GetImage())
if err != nil {
return nil, err
}
return &runtimeApi.RemoveImageResponse{}, nil
return &runtimeapi.RemoveImageResponse{}, nil
}

View File

@ -41,7 +41,7 @@ import (
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/fields"
internalApi "k8s.io/kubernetes/pkg/kubelet/api"
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/config"
@ -256,7 +256,7 @@ func makePodSourceConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps
return cfg, nil
}
func getRuntimeAndImageServices(config *componentconfig.KubeletConfiguration) (internalApi.RuntimeService, internalApi.ImageManagerService, error) {
func getRuntimeAndImageServices(config *componentconfig.KubeletConfiguration) (internalapi.RuntimeService, internalapi.ImageManagerService, error) {
rs, err := remote.NewRemoteRuntimeService(config.RemoteRuntimeEndpoint, config.RuntimeRequestTimeout.Duration)
if err != nil {
return nil, nil, err
@ -529,8 +529,8 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
// becomes the default.
klet.networkPlugin = nil
var runtimeService internalApi.RuntimeService
var imageService internalApi.ImageManagerService
var runtimeService internalapi.RuntimeService
var imageService internalapi.ImageManagerService
var err error
switch kubeCfg.ContainerRuntime {
@ -548,8 +548,8 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
}
klet.criHandler = ds
rs := ds.(internalApi.RuntimeService)
is := ds.(internalApi.ImageManagerService)
rs := ds.(internalapi.RuntimeService)
is := ds.(internalapi.ImageManagerService)
// This is an internal knob to switch between grpc and non-grpc
// integration.
// TODO: Remove this knob once we switch to using GRPC completely.

View File

@ -20,7 +20,7 @@ import (
"fmt"
"math"
"net"
goRuntime "runtime"
goruntime "runtime"
"sort"
"strings"
"time"
@ -177,8 +177,8 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
Name: string(kl.nodeName),
Labels: map[string]string{
unversioned.LabelHostname: kl.hostname,
unversioned.LabelOS: goRuntime.GOOS,
unversioned.LabelArch: goRuntime.GOARCH,
unversioned.LabelOS: goruntime.GOOS,
unversioned.LabelArch: goruntime.GOARCH,
},
},
Spec: v1.NodeSpec{
@ -572,8 +572,8 @@ func (kl *Kubelet) setNodeStatusImages(node *v1.Node) {
// Set the GOOS and GOARCH for this node
func (kl *Kubelet) setNodeStatusGoRuntime(node *v1.Node) {
node.Status.NodeInfo.OperatingSystem = goRuntime.GOOS
node.Status.NodeInfo.Architecture = goRuntime.GOARCH
node.Status.NodeInfo.OperatingSystem = goruntime.GOOS
node.Status.NodeInfo.Architecture = goruntime.GOARCH
}
// Set status for the node.

View File

@ -25,7 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/credentialprovider"
internalApi "k8s.io/kubernetes/pkg/kubelet/api"
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
@ -91,7 +91,7 @@ func (f *fakePodGetter) GetPodByUID(uid types.UID) (*v1.Pod, bool) {
return pod, found
}
func NewFakeKubeRuntimeManager(runtimeService internalApi.RuntimeService, imageService internalApi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, networkPlugin network.NetworkPlugin, osInterface kubecontainer.OSInterface) (*kubeGenericRuntimeManager, error) {
func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, networkPlugin network.NetworkPlugin, osInterface kubecontainer.OSInterface) (*kubeGenericRuntimeManager, error) {
recorder := &record.FakeRecorder{}
kubeRuntimeManager := &kubeGenericRuntimeManager{
recorder: recorder,

View File

@ -23,7 +23,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
)
@ -57,7 +57,7 @@ func (b containersByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b containersByID) Less(i, j int) bool { return b[i].ID.ID < b[j].ID.ID }
// Newest first.
type podSandboxByCreated []*runtimeApi.PodSandbox
type podSandboxByCreated []*runtimeapi.PodSandbox
func (p podSandboxByCreated) Len() int { return len(p) }
func (p podSandboxByCreated) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
@ -69,37 +69,37 @@ func (c containerStatusByCreated) Len() int { return len(c) }
func (c containerStatusByCreated) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c containerStatusByCreated) Less(i, j int) bool { return c[i].CreatedAt.After(c[j].CreatedAt) }
// toKubeContainerState converts runtimeApi.ContainerState to kubecontainer.ContainerState.
func toKubeContainerState(state runtimeApi.ContainerState) kubecontainer.ContainerState {
// toKubeContainerState converts runtimeapi.ContainerState to kubecontainer.ContainerState.
func toKubeContainerState(state runtimeapi.ContainerState) kubecontainer.ContainerState {
switch state {
case runtimeApi.ContainerState_CONTAINER_CREATED:
case runtimeapi.ContainerState_CONTAINER_CREATED:
return kubecontainer.ContainerStateCreated
case runtimeApi.ContainerState_CONTAINER_RUNNING:
case runtimeapi.ContainerState_CONTAINER_RUNNING:
return kubecontainer.ContainerStateRunning
case runtimeApi.ContainerState_CONTAINER_EXITED:
case runtimeapi.ContainerState_CONTAINER_EXITED:
return kubecontainer.ContainerStateExited
case runtimeApi.ContainerState_CONTAINER_UNKNOWN:
case runtimeapi.ContainerState_CONTAINER_UNKNOWN:
return kubecontainer.ContainerStateUnknown
}
return kubecontainer.ContainerStateUnknown
}
// toRuntimeProtocol converts v1.Protocol to runtimeApi.Protocol.
func toRuntimeProtocol(protocol v1.Protocol) runtimeApi.Protocol {
// toRuntimeProtocol converts v1.Protocol to runtimeapi.Protocol.
func toRuntimeProtocol(protocol v1.Protocol) runtimeapi.Protocol {
switch protocol {
case v1.ProtocolTCP:
return runtimeApi.Protocol_TCP
return runtimeapi.Protocol_TCP
case v1.ProtocolUDP:
return runtimeApi.Protocol_UDP
return runtimeapi.Protocol_UDP
}
glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol)
return runtimeApi.Protocol_TCP
return runtimeapi.Protocol_TCP
}
// toKubeContainer converts runtimeApi.Container to kubecontainer.Container.
func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeApi.Container) (*kubecontainer.Container, error) {
// toKubeContainer converts runtimeapi.Container to kubecontainer.Container.
func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeapi.Container) (*kubecontainer.Container, error) {
if c == nil || c.Id == nil || c.Image == nil || c.State == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
}
@ -115,11 +115,11 @@ func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeApi.Container) (*k
}, nil
}
// sandboxToKubeContainer converts runtimeApi.PodSandbox to kubecontainer.Container.
// sandboxToKubeContainer converts runtimeapi.PodSandbox to kubecontainer.Container.
// This is only needed because we need to return sandboxes as if they were
// kubecontainer.Containers to avoid substantial changes to PLEG.
// TODO: Remove this once it becomes obsolete.
func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeApi.PodSandbox) (*kubecontainer.Container, error) {
func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeapi.PodSandbox) (*kubecontainer.Container, error) {
if s == nil || s.Id == nil || s.State == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
}
@ -149,7 +149,7 @@ func getContainerSpec(pod *v1.Pod, containerName string) *v1.Container {
// getImageUser gets uid or user name that will run the command(s) from image. The function
// guarantees that only one of them is set.
func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, *string, error) {
imageStatus, err := m.imageService.ImageStatus(&runtimeApi.ImageSpec{Image: &image})
imageStatus, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: &image})
if err != nil {
return nil, nil, err
}
@ -237,8 +237,8 @@ func buildPodLogsDirectory(podUID types.UID) string {
return filepath.Join(podLogsRootDirectory, string(podUID))
}
// toKubeRuntimeStatus converts the runtimeApi.RuntimeStatus to kubecontainer.RuntimeStatus.
func toKubeRuntimeStatus(status *runtimeApi.RuntimeStatus) *kubecontainer.RuntimeStatus {
// toKubeRuntimeStatus converts the runtimeapi.RuntimeStatus to kubecontainer.RuntimeStatus.
func toKubeRuntimeStatus(status *runtimeapi.RuntimeStatus) *kubecontainer.RuntimeStatus {
conditions := []kubecontainer.RuntimeCondition{}
for _, c := range status.GetConditions() {
conditions = append(conditions, kubecontainer.RuntimeCondition{

View File

@ -19,30 +19,30 @@ package kuberuntime
import (
"time"
internalApi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/metrics"
)
// instrumentedRuntimeService wraps the RuntimeService and records the operations
// and errors metrics.
type instrumentedRuntimeService struct {
service internalApi.RuntimeService
service internalapi.RuntimeService
}
// Creates an instrumented RuntimeInterface from an existing RuntimeService.
func NewInstrumentedRuntimeService(service internalApi.RuntimeService) internalApi.RuntimeService {
func NewInstrumentedRuntimeService(service internalapi.RuntimeService) internalapi.RuntimeService {
return &instrumentedRuntimeService{service: service}
}
// instrumentedImageManagerService wraps the ImageManagerService and records the operations
// and errors metrics.
type instrumentedImageManagerService struct {
service internalApi.ImageManagerService
service internalapi.ImageManagerService
}
// Creates an instrumented ImageManagerService from an existing ImageManagerService.
func NewInstrumentedImageManagerService(service internalApi.ImageManagerService) internalApi.ImageManagerService {
func NewInstrumentedImageManagerService(service internalapi.ImageManagerService) internalapi.ImageManagerService {
return &instrumentedImageManagerService{service: service}
}
@ -59,7 +59,7 @@ func recordError(operation string, err error) {
}
}
func (in instrumentedRuntimeService) Version(apiVersion string) (*runtimeApi.VersionResponse, error) {
func (in instrumentedRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) {
const operation = "version"
defer recordOperation(operation, time.Now())
@ -68,7 +68,7 @@ func (in instrumentedRuntimeService) Version(apiVersion string) (*runtimeApi.Ver
return out, err
}
func (in instrumentedRuntimeService) Status() (*runtimeApi.RuntimeStatus, error) {
func (in instrumentedRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) {
const operation = "status"
defer recordOperation(operation, time.Now())
@ -77,7 +77,7 @@ func (in instrumentedRuntimeService) Status() (*runtimeApi.RuntimeStatus, error)
return out, err
}
func (in instrumentedRuntimeService) CreateContainer(podSandboxID string, config *runtimeApi.ContainerConfig, sandboxConfig *runtimeApi.PodSandboxConfig) (string, error) {
func (in instrumentedRuntimeService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
const operation = "create_container"
defer recordOperation(operation, time.Now())
@ -113,7 +113,7 @@ func (in instrumentedRuntimeService) RemoveContainer(containerID string) error {
return err
}
func (in instrumentedRuntimeService) ListContainers(filter *runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) {
func (in instrumentedRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
const operation = "list_containers"
defer recordOperation(operation, time.Now())
@ -122,7 +122,7 @@ func (in instrumentedRuntimeService) ListContainers(filter *runtimeApi.Container
return out, err
}
func (in instrumentedRuntimeService) ContainerStatus(containerID string) (*runtimeApi.ContainerStatus, error) {
func (in instrumentedRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
const operation = "container_status"
defer recordOperation(operation, time.Now())
@ -140,7 +140,7 @@ func (in instrumentedRuntimeService) ExecSync(containerID string, cmd []string,
return stdout, stderr, err
}
func (in instrumentedRuntimeService) Exec(req *runtimeApi.ExecRequest) (*runtimeApi.ExecResponse, error) {
func (in instrumentedRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
const operation = "exec"
defer recordOperation(operation, time.Now())
@ -149,7 +149,7 @@ func (in instrumentedRuntimeService) Exec(req *runtimeApi.ExecRequest) (*runtime
return resp, err
}
func (in instrumentedRuntimeService) Attach(req *runtimeApi.AttachRequest) (*runtimeApi.AttachResponse, error) {
func (in instrumentedRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
const operation = "attach"
defer recordOperation(operation, time.Now())
@ -158,7 +158,7 @@ func (in instrumentedRuntimeService) Attach(req *runtimeApi.AttachRequest) (*run
return resp, err
}
func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeApi.PodSandboxConfig) (string, error) {
func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error) {
const operation = "run_podsandbox"
defer recordOperation(operation, time.Now())
@ -185,7 +185,7 @@ func (in instrumentedRuntimeService) RemovePodSandbox(podSandboxID string) error
return err
}
func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeApi.PodSandboxStatus, error) {
func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) {
const operation = "podsandbox_status"
defer recordOperation(operation, time.Now())
@ -194,7 +194,7 @@ func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string) (*run
return out, err
}
func (in instrumentedRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]*runtimeApi.PodSandbox, error) {
func (in instrumentedRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
const operation = "list_podsandbox"
defer recordOperation(operation, time.Now())
@ -203,7 +203,7 @@ func (in instrumentedRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandbo
return out, err
}
func (in instrumentedRuntimeService) PortForward(req *runtimeApi.PortForwardRequest) (*runtimeApi.PortForwardResponse, error) {
func (in instrumentedRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
const operation = "port_forward"
defer recordOperation(operation, time.Now())
@ -212,7 +212,7 @@ func (in instrumentedRuntimeService) PortForward(req *runtimeApi.PortForwardRequ
return resp, err
}
func (in instrumentedRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeApi.RuntimeConfig) error {
func (in instrumentedRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error {
const operation = "update_runtime_config"
defer recordOperation(operation, time.Now())
@ -221,7 +221,7 @@ func (in instrumentedRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeA
return err
}
func (in instrumentedImageManagerService) ListImages(filter *runtimeApi.ImageFilter) ([]*runtimeApi.Image, error) {
func (in instrumentedImageManagerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
const operation = "list_images"
defer recordOperation(operation, time.Now())
@ -230,7 +230,7 @@ func (in instrumentedImageManagerService) ListImages(filter *runtimeApi.ImageFil
return out, err
}
func (in instrumentedImageManagerService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi.Image, error) {
func (in instrumentedImageManagerService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
const operation = "image_status"
defer recordOperation(operation, time.Now())
@ -239,7 +239,7 @@ func (in instrumentedImageManagerService) ImageStatus(image *runtimeApi.ImageSpe
return out, err
}
func (in instrumentedImageManagerService) PullImage(image *runtimeApi.ImageSpec, auth *runtimeApi.AuthConfig) error {
func (in instrumentedImageManagerService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) error {
const operation = "pull_image"
defer recordOperation(operation, time.Now())
@ -248,7 +248,7 @@ func (in instrumentedImageManagerService) PullImage(image *runtimeApi.ImageSpec,
return err
}
func (in instrumentedImageManagerService) RemoveImage(image *runtimeApi.ImageSpec) error {
func (in instrumentedImageManagerService) RemoveImage(image *runtimeapi.ImageSpec) error {
const operation = "remove_image"
defer recordOperation(operation, time.Now())

View File

@ -31,7 +31,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/qos"
@ -49,7 +49,7 @@ import (
// * create the container
// * start the container
// * run the post start lifecycle hooks (if applicable)
func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeApi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string) (string, error) {
func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string) (string, error) {
// Step 1: pull the image.
err, msg := m.imagePuller.EnsureImageExists(pod, container, pullSecrets)
if err != nil {
@ -129,7 +129,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
}
// generateContainerConfig generates container config for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP string) (*runtimeApi.ContainerConfig, error) {
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP string) (*runtimeapi.ContainerConfig, error) {
opts, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
if err != nil {
return nil, err
@ -151,12 +151,12 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
command, args := kubecontainer.ExpandContainerCommandAndArgs(container, opts.Envs)
containerLogsPath := buildContainerLogsPath(container.Name, restartCount)
restartCountUint32 := uint32(restartCount)
config := &runtimeApi.ContainerConfig{
Metadata: &runtimeApi.ContainerMetadata{
config := &runtimeapi.ContainerConfig{
Metadata: &runtimeapi.ContainerMetadata{
Name: &container.Name,
Attempt: &restartCountUint32,
},
Image: &runtimeApi.ImageSpec{Image: &container.Image},
Image: &runtimeapi.ImageSpec{Image: &container.Image},
Command: command,
Args: args,
WorkingDir: &container.WorkingDir,
@ -172,10 +172,10 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
}
// set environment variables
envs := make([]*runtimeApi.KeyValue, len(opts.Envs))
envs := make([]*runtimeapi.KeyValue, len(opts.Envs))
for idx := range opts.Envs {
e := opts.Envs[idx]
envs[idx] = &runtimeApi.KeyValue{
envs[idx] = &runtimeapi.KeyValue{
Key: &e.Name,
Value: &e.Value,
}
@ -186,9 +186,9 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
}
// generateLinuxContainerConfig generates linux container config for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username *string) *runtimeApi.LinuxContainerConfig {
lc := &runtimeApi.LinuxContainerConfig{
Resources: &runtimeApi.LinuxContainerResources{},
func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username *string) *runtimeapi.LinuxContainerConfig {
lc := &runtimeapi.LinuxContainerConfig{
Resources: &runtimeapi.LinuxContainerResources{},
SecurityContext: m.determineEffectiveSecurityContext(pod, container, uid, username),
}
@ -229,12 +229,12 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C
}
// makeDevices generates container devices for kubelet runtime v1.
func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeApi.Device {
devices := make([]*runtimeApi.Device, len(opts.Devices))
func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeapi.Device {
devices := make([]*runtimeapi.Device, len(opts.Devices))
for idx := range opts.Devices {
device := opts.Devices[idx]
devices[idx] = &runtimeApi.Device{
devices[idx] = &runtimeapi.Device{
HostPath: &device.PathOnHost,
ContainerPath: &device.PathInContainer,
Permissions: &device.Permissions,
@ -245,13 +245,13 @@ func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeApi.Device {
}
// makeMounts generates container volume mounts for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerOptions, container *v1.Container) []*runtimeApi.Mount {
volumeMounts := []*runtimeApi.Mount{}
func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerOptions, container *v1.Container) []*runtimeapi.Mount {
volumeMounts := []*runtimeapi.Mount{}
for idx := range opts.Mounts {
v := opts.Mounts[idx]
selinuxRelabel := v.SELinuxRelabel && selinux.SELinuxEnabled()
mount := &runtimeApi.Mount{
mount := &runtimeapi.Mount{
HostPath: &v.HostPath,
ContainerPath: &v.ContainerPath,
Readonly: &v.ReadOnly,
@ -276,7 +276,7 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO
} else {
fs.Close()
selinuxRelabel := selinux.SELinuxEnabled()
volumeMounts = append(volumeMounts, &runtimeApi.Mount{
volumeMounts = append(volumeMounts, &runtimeapi.Mount{
HostPath: &containerLogPath,
ContainerPath: &container.TerminationMessagePath,
SelinuxRelabel: &selinuxRelabel,
@ -290,12 +290,12 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO
// getKubeletContainers lists containers managed by kubelet.
// The boolean parameter specifies whether returns all containers including
// those already exited and dead containers (used for garbage collection).
func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]*runtimeApi.Container, error) {
filter := &runtimeApi.ContainerFilter{
func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]*runtimeapi.Container, error) {
filter := &runtimeapi.ContainerFilter{
LabelSelector: map[string]string{kubernetesManagedLabel: "true"},
}
if !allContainers {
runningState := runtimeApi.ContainerState_CONTAINER_RUNNING
runningState := runtimeapi.ContainerState_CONTAINER_RUNNING
filter.State = &runningState
}
@ -309,7 +309,7 @@ func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]
}
// getContainers lists containers by filter.
func (m *kubeGenericRuntimeManager) getContainersHelper(filter *runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) {
func (m *kubeGenericRuntimeManager) getContainersHelper(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
resp, err := m.runtimeService.ListContainers(filter)
if err != nil {
return nil, err
@ -324,7 +324,7 @@ func makeUID() string {
}
// getTerminationMessage gets termination message of the container.
func getTerminationMessage(status *runtimeApi.ContainerStatus, kubeStatus *kubecontainer.ContainerStatus, terminationMessagePath string) string {
func getTerminationMessage(status *runtimeapi.ContainerStatus, kubeStatus *kubecontainer.ContainerStatus, terminationMessagePath string) string {
message := ""
if !kubeStatus.FinishedAt.IsZero() || kubeStatus.ExitCode != 0 {
@ -351,7 +351,7 @@ func getTerminationMessage(status *runtimeApi.ContainerStatus, kubeStatus *kubec
// getPodContainerStatuses gets all containers' statuses for the pod.
func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, name, namespace string) ([]*kubecontainer.ContainerStatus, error) {
// Select all containers of the given pod.
containers, err := m.runtimeService.ListContainers(&runtimeApi.ContainerFilter{
containers, err := m.runtimeService.ListContainers(&runtimeapi.ContainerFilter{
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(uid)},
})
if err != nil {
@ -384,7 +384,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
CreatedAt: time.Unix(0, status.GetCreatedAt()),
}
if c.GetState() == runtimeApi.ContainerState_CONTAINER_RUNNING {
if c.GetState() == runtimeapi.ContainerState_CONTAINER_RUNNING {
cStatus.StartedAt = time.Unix(0, status.GetStartedAt())
} else {
cStatus.Reason = status.GetReason()
@ -669,7 +669,7 @@ func (m *kubeGenericRuntimeManager) GetContainerLogs(pod *v1.Pod, containerID ku
// GetExec gets the endpoint the runtime will serve the exec request from.
func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
req := &runtimeApi.ExecRequest{
req := &runtimeapi.ExecRequest{
ContainerId: &id.ID,
Cmd: cmd,
Tty: &tty,
@ -685,7 +685,7 @@ func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []
// GetAttach gets the endpoint the runtime will serve the attach request from.
func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr bool) (*url.URL, error) {
req := &runtimeApi.AttachRequest{
req := &runtimeapi.AttachRequest{
ContainerId: &id.ID,
Stdin: &stdin,
}

View File

@ -22,7 +22,7 @@ import (
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
)
@ -60,7 +60,7 @@ func TestRemoveContainer(t *testing.T) {
assert.Equal(t, fakeOS.Removes, []string{expectedContainerLogPath, expectedContainerLogSymlink})
// Verify container is removed
fakeRuntime.AssertCalls([]string{"RemoveContainer"})
containers, err := fakeRuntime.ListContainers(&runtimeApi.ContainerFilter{Id: &containerId})
containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: &containerId})
assert.NoError(t, err)
assert.Empty(t, containers)
}

View File

@ -24,8 +24,8 @@ import (
"time"
"github.com/golang/glog"
internalApi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
)
@ -46,13 +46,13 @@ const sandboxMinGCAge time.Duration = 30 * time.Second
// containerGC is the manager of garbage collection.
type containerGC struct {
client internalApi.RuntimeService
client internalapi.RuntimeService
manager *kubeGenericRuntimeManager
podGetter podGetter
}
// NewContainerGC creates a new containerGC.
func NewContainerGC(client internalApi.RuntimeService, podGetter podGetter, manager *kubeGenericRuntimeManager) *containerGC {
func NewContainerGC(client internalapi.RuntimeService, podGetter podGetter, manager *kubeGenericRuntimeManager) *containerGC {
return &containerGC{
client: client,
manager: manager,
@ -161,7 +161,7 @@ func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByE
newestGCTime := time.Now().Add(-minAge)
for _, container := range containers {
// Prune out running containers.
if container.GetState() == runtimeApi.ContainerState_CONTAINER_RUNNING {
if container.GetState() == runtimeapi.ContainerState_CONTAINER_RUNNING {
continue
}
@ -256,7 +256,7 @@ func (cgc *containerGC) evictSandboxes(minAge time.Duration) error {
newestGCTime := time.Now().Add(-minAge)
for _, sandbox := range sandboxes {
// Prune out ready sandboxes.
if sandbox.GetState() == runtimeApi.PodSandboxState_SANDBOX_READY {
if sandbox.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
continue
}

View File

@ -25,7 +25,7 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
)
@ -54,7 +54,7 @@ func TestSandboxGC(t *testing.T) {
{
description: "sandbox with no containers should be garbage collected.",
sandboxes: []sandboxTemplate{
{pod: pods[0], state: runtimeApi.PodSandboxState_SANDBOX_NOTREADY},
{pod: pods[0], state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
},
containers: []containerTemplate{},
remain: []int{},
@ -62,7 +62,7 @@ func TestSandboxGC(t *testing.T) {
{
description: "running sandbox should not be garbage collected.",
sandboxes: []sandboxTemplate{
{pod: pods[0], state: runtimeApi.PodSandboxState_SANDBOX_READY},
{pod: pods[0], state: runtimeapi.PodSandboxState_SANDBOX_READY},
},
containers: []containerTemplate{},
remain: []int{0},
@ -70,18 +70,18 @@ func TestSandboxGC(t *testing.T) {
{
description: "sandbox with containers should not be garbage collected.",
sandboxes: []sandboxTemplate{
{pod: pods[0], state: runtimeApi.PodSandboxState_SANDBOX_NOTREADY},
{pod: pods[0], state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
},
containers: []containerTemplate{
{pod: pods[0], container: &pods[0].Spec.Containers[0], state: runtimeApi.ContainerState_CONTAINER_EXITED},
{pod: pods[0], container: &pods[0].Spec.Containers[0], state: runtimeapi.ContainerState_CONTAINER_EXITED},
},
remain: []int{0},
},
{
description: "sandbox within min age should not be garbage collected.",
sandboxes: []sandboxTemplate{
{pod: pods[0], createdAt: time.Now().UnixNano(), state: runtimeApi.PodSandboxState_SANDBOX_NOTREADY},
{pod: pods[1], createdAt: time.Now().Add(-2 * time.Hour).UnixNano(), state: runtimeApi.PodSandboxState_SANDBOX_NOTREADY},
{pod: pods[0], createdAt: time.Now().UnixNano(), state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
{pod: pods[1], createdAt: time.Now().Add(-2 * time.Hour).UnixNano(), state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
},
containers: []containerTemplate{},
minAge: time.Hour, // assume the test won't take an hour
@ -91,14 +91,14 @@ func TestSandboxGC(t *testing.T) {
description: "multiple sandboxes should be handled properly.",
sandboxes: []sandboxTemplate{
// running sandbox.
{pod: pods[0], attempt: 1, state: runtimeApi.PodSandboxState_SANDBOX_READY},
{pod: pods[0], attempt: 1, state: runtimeapi.PodSandboxState_SANDBOX_READY},
// exited sandbox with containers.
{pod: pods[1], attempt: 1, state: runtimeApi.PodSandboxState_SANDBOX_NOTREADY},
{pod: pods[1], attempt: 1, state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
// exited sandbox without containers.
{pod: pods[1], attempt: 0, state: runtimeApi.PodSandboxState_SANDBOX_NOTREADY},
{pod: pods[1], attempt: 0, state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
},
containers: []containerTemplate{
{pod: pods[1], container: &pods[1].Spec.Containers[0], sandboxAttempt: 1, state: runtimeApi.ContainerState_CONTAINER_EXITED},
{pod: pods[1], container: &pods[1].Spec.Containers[0], sandboxAttempt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED},
},
remain: []int{0, 1},
},
@ -127,7 +127,7 @@ func TestContainerGC(t *testing.T) {
assert.NoError(t, err)
fakePodGetter := m.containerGC.podGetter.(*fakePodGetter)
makeGCContainer := func(podName, containerName string, attempt int, createdAt int64, state runtimeApi.ContainerState) containerTemplate {
makeGCContainer := func(podName, containerName string, attempt int, createdAt int64, state runtimeapi.ContainerState) containerTemplate {
container := makeTestContainer(containerName, "test-image")
pod := makeTestPod(podName, "test-ns", podName, []v1.Container{container})
if podName != "deleted" {
@ -153,7 +153,7 @@ func TestContainerGC(t *testing.T) {
{
description: "all containers should be removed when max container limit is 0",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: 1, MaxContainers: 0},
remain: []int{},
@ -161,11 +161,11 @@ func TestContainerGC(t *testing.T) {
{
description: "max containers should be complied when no max per pod container limit is set",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 4, 4, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 3, 3, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 4, 4, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 3, 3, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: 4},
remain: []int{0, 1, 2, 3},
@ -173,9 +173,9 @@ func TestContainerGC(t *testing.T) {
{
description: "no containers should be removed if both max container and per pod container limits are not set",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: -1},
remain: []int{0, 1, 2},
@ -183,94 +183,94 @@ func TestContainerGC(t *testing.T) {
{
description: "recently started containers should not be removed",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, time.Now().UnixNano(), runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, time.Now().UnixNano(), runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, time.Now().UnixNano(), runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 2, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0, 1, 2},
},
{
description: "oldest containers should be removed when per pod container limit exceeded",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0, 1},
},
{
description: "running containers should not be removed",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_RUNNING),
makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_RUNNING),
},
remain: []int{0, 1, 2},
},
{
description: "no containers should be removed when limits are not exceeded",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0, 1},
},
{
description: "max container count should apply per (UID, container) pair",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "baz", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "baz", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "baz", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "baz", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "baz", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "baz", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0, 1, 3, 4, 6, 7},
},
{
description: "max limit should apply and try to keep from every pod",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar2", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar2", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo3", "bar3", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo3", "bar3", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo4", "bar4", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo4", "bar4", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar2", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar2", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo3", "bar3", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo3", "bar3", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo4", "bar4", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo4", "bar4", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0, 2, 4, 6, 8},
},
{
description: "oldest pods should be removed if limit exceeded",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar2", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo3", "bar3", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo4", "bar4", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo5", "bar5", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo6", "bar6", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo7", "bar7", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar2", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo3", "bar3", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo4", "bar4", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo5", "bar5", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo6", "bar6", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo7", "bar7", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0, 2, 4, 6, 8, 9},
},
{
description: "containers for deleted pods should be removed",
containers: []containerTemplate{
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
// deleted pods still respect MinAge.
makeGCContainer("deleted", "bar1", 2, time.Now().UnixNano(), runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("deleted", "bar1", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("deleted", "bar1", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED),
makeGCContainer("deleted", "bar1", 2, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("deleted", "bar1", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("deleted", "bar1", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0, 1, 2},
},

View File

@ -20,7 +20,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/credentialprovider"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
"k8s.io/kubernetes/pkg/util/parsers"
@ -40,7 +40,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
return err
}
imgSpec := &runtimeApi.ImageSpec{Image: &img}
imgSpec := &runtimeapi.ImageSpec{Image: &img}
creds, withCredentials := keyring.Lookup(repoToPull)
if !withCredentials {
glog.V(3).Infof("Pulling image %q without credentials", img)
@ -57,7 +57,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
var pullErrs []error
for _, currentCreds := range creds {
authConfig := credentialprovider.LazyProvide(currentCreds)
auth := &runtimeApi.AuthConfig{
auth := &runtimeapi.AuthConfig{
Username: &authConfig.Username,
Password: &authConfig.Password,
Auth: &authConfig.Auth,
@ -80,7 +80,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
// IsImagePresent checks whether the container image is already in the local storage.
func (m *kubeGenericRuntimeManager) IsImagePresent(image kubecontainer.ImageSpec) (bool, error) {
status, err := m.imageService.ImageStatus(&runtimeApi.ImageSpec{Image: &image.Image})
status, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: &image.Image})
if err != nil {
glog.Errorf("ImageStatus for image %q failed: %v", image, err)
return false, err
@ -112,7 +112,7 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error)
// RemoveImage removes the specified image.
func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) error {
err := m.imageService.RemoveImage(&runtimeApi.ImageSpec{Image: &image.Image})
err := m.imageService.RemoveImage(&runtimeapi.ImageSpec{Image: &image.Image})
if err != nil {
glog.Errorf("Remove image %q failed: %v", image.Image, err)
return err

View File

@ -29,8 +29,8 @@ import (
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/credentialprovider"
internalApi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/images"
@ -101,8 +101,8 @@ type kubeGenericRuntimeManager struct {
imagePuller images.ImageManager
// gRPC service clients
runtimeService internalApi.RuntimeService
imageService internalApi.ImageManagerService
runtimeService internalapi.RuntimeService
imageService internalapi.ImageManagerService
// The version cache of runtime daemon.
versionCache *cache.ObjectCache
@ -130,8 +130,8 @@ func NewKubeGenericRuntimeManager(
imagePullQPS float32,
imagePullBurst int,
cpuCFSQuota bool,
runtimeService internalApi.RuntimeService,
imageService internalApi.ImageManagerService,
runtimeService internalapi.RuntimeService,
imageService internalapi.ImageManagerService,
) (KubeGenericRuntime, error) {
kubeRuntimeManager := &kubeGenericRuntimeManager{
recorder: recorder,
@ -231,7 +231,7 @@ func (r runtimeVersion) Compare(other string) (int, error) {
return 0, nil
}
func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeApi.VersionResponse, error) {
func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionResponse, error) {
typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion)
if err != nil {
glog.Errorf("Get remote runtime typed version failed: %v", err)
@ -259,7 +259,7 @@ func (m *kubeGenericRuntimeManager) APIVersion() (kubecontainer.Version, error)
if err != nil {
return nil, err
}
typedVersion := versionObject.(*runtimeApi.VersionResponse)
typedVersion := versionObject.(*runtimeapi.VersionResponse)
return newRuntimeVersion(typedVersion.GetRuntimeApiVersion())
}
@ -396,14 +396,14 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *ku
readySandboxCount := 0
for _, s := range podStatus.SandboxStatuses {
if s.GetState() == runtimeApi.PodSandboxState_SANDBOX_READY {
if s.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
readySandboxCount++
}
}
// Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one.
sandboxStatus := podStatus.SandboxStatuses[0]
if readySandboxCount > 1 || sandboxStatus.GetState() != runtimeApi.PodSandboxState_SANDBOX_READY {
if readySandboxCount > 1 || sandboxStatus.GetState() != runtimeapi.PodSandboxState_SANDBOX_READY {
glog.V(2).Infof("No ready sandbox for pod %q can be found. Need to start a new one", format.Pod(pod))
return true, sandboxStatus.Metadata.GetAttempt() + 1, sandboxStatus.GetId()
}
@ -857,7 +857,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
})
glog.V(4).Infof("getSandboxIDByPodUID got sandbox IDs %q for pod %q", podSandboxIDs, podFullName)
sandboxStatuses := make([]*runtimeApi.PodSandboxStatus, len(podSandboxIDs))
sandboxStatuses := make([]*runtimeapi.PodSandboxStatus, len(podSandboxIDs))
podIP := ""
for idx, podSandboxID := range podSandboxIDs {
podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID)
@ -868,7 +868,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
sandboxStatuses[idx] = podSandboxStatus
// Only get pod IP from latest sandbox
if idx == 0 && podSandboxStatus.GetState() == runtimeApi.PodSandboxState_SANDBOX_READY {
if idx == 0 && podSandboxStatus.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
podIP = m.determinePodSandboxIP(namespace, name, podSandboxStatus)
}
}
@ -922,8 +922,8 @@ func (m *kubeGenericRuntimeManager) UpdatePodCIDR(podCIDR string) error {
// field of the config?
glog.Infof("updating runtime config through cri with podcidr %v", podCIDR)
return m.runtimeService.UpdateRuntimeConfig(
&runtimeApi.RuntimeConfig{
NetworkConfig: &runtimeApi.NetworkConfig{
&runtimeapi.RuntimeConfig{
NetworkConfig: &runtimeapi.NetworkConfig{
PodCidr: &podCIDR,
},
})

View File

@ -27,7 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/componentconfig"
apitest "k8s.io/kubernetes/pkg/kubelet/api/testing"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/network"
@ -66,7 +66,7 @@ type sandboxTemplate struct {
pod *v1.Pod
attempt uint32
createdAt int64
state runtimeApi.PodSandboxState
state runtimeapi.PodSandboxState
}
// containerTemplate is a container template to create fake container.
@ -76,7 +76,7 @@ type containerTemplate struct {
sandboxAttempt uint32
attempt int
createdAt int64
state runtimeApi.ContainerState
state runtimeapi.ContainerState
}
// makeAndSetFakePod is a helper function to create and set one fake sandbox for a pod and
@ -86,7 +86,7 @@ func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime *
sandbox := makeFakePodSandbox(t, m, sandboxTemplate{
pod: pod,
createdAt: fakeCreatedAt,
state: runtimeApi.PodSandboxState_SANDBOX_READY,
state: runtimeapi.PodSandboxState_SANDBOX_READY,
})
var containers []*apitest.FakeContainer
@ -95,7 +95,7 @@ func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime *
pod: pod,
container: c,
createdAt: fakeCreatedAt,
state: runtimeApi.ContainerState_CONTAINER_RUNNING,
state: runtimeapi.ContainerState_CONTAINER_RUNNING,
}
}
for i := range pod.Spec.Containers {
@ -117,12 +117,12 @@ func makeFakePodSandbox(t *testing.T, m *kubeGenericRuntimeManager, template san
podSandboxID := apitest.BuildSandboxName(config.Metadata)
return &apitest.FakePodSandbox{
PodSandboxStatus: runtimeApi.PodSandboxStatus{
PodSandboxStatus: runtimeapi.PodSandboxStatus{
Id: &podSandboxID,
Metadata: config.Metadata,
State: &template.state,
CreatedAt: &template.createdAt,
Network: &runtimeApi.PodSandboxNetworkStatus{
Network: &runtimeapi.PodSandboxNetworkStatus{
Ip: &apitest.FakePodSandboxIP,
},
Labels: config.Labels,
@ -152,7 +152,7 @@ func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template cont
containerID := apitest.BuildContainerName(containerConfig.Metadata, podSandboxID)
imageRef := containerConfig.Image.GetImage()
return &apitest.FakeContainer{
ContainerStatus: runtimeApi.ContainerStatus{
ContainerStatus: runtimeapi.ContainerStatus{
Id: &containerID,
Metadata: containerConfig.Metadata,
Image: containerConfig.Image,
@ -321,7 +321,7 @@ func TestGetPods(t *testing.T) {
containers := make([]*kubecontainer.Container, len(fakeContainers))
for i := range containers {
fakeContainer := fakeContainers[i]
c, err := m.toKubeContainer(&runtimeApi.Container{
c, err := m.toKubeContainer(&runtimeapi.Container{
Id: fakeContainer.Id,
Metadata: fakeContainer.Metadata,
State: fakeContainer.State,
@ -336,7 +336,7 @@ func TestGetPods(t *testing.T) {
containers[i] = c
}
// Convert fakeSandbox to kubecontainer.Container
sandbox, err := m.sandboxToKubeContainer(&runtimeApi.PodSandbox{
sandbox, err := m.sandboxToKubeContainer(&runtimeapi.PodSandbox{
Id: fakeSandbox.Id,
Metadata: fakeSandbox.Metadata,
State: fakeSandbox.State,
@ -393,7 +393,7 @@ func TestGetPodContainerID(t *testing.T) {
fakeSandbox, _ := makeAndSetFakePod(t, m, fakeRuntime, pod)
// Convert fakeSandbox to kubecontainer.Container
sandbox, err := m.sandboxToKubeContainer(&runtimeApi.PodSandbox{
sandbox, err := m.sandboxToKubeContainer(&runtimeapi.PodSandbox{
Id: fakeSandbox.Id,
Metadata: fakeSandbox.Metadata,
State: fakeSandbox.State,
@ -476,7 +476,7 @@ func TestKillPod(t *testing.T) {
containers := make([]*kubecontainer.Container, len(fakeContainers))
for i := range containers {
fakeContainer := fakeContainers[i]
c, err := m.toKubeContainer(&runtimeApi.Container{
c, err := m.toKubeContainer(&runtimeapi.Container{
Id: fakeContainer.Id,
Metadata: fakeContainer.Metadata,
State: fakeContainer.State,
@ -509,10 +509,10 @@ func TestKillPod(t *testing.T) {
assert.Equal(t, 2, len(fakeRuntime.Containers))
assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
for _, sandbox := range fakeRuntime.Sandboxes {
assert.Equal(t, runtimeApi.PodSandboxState_SANDBOX_NOTREADY, sandbox.GetState())
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.GetState())
}
for _, c := range fakeRuntime.Containers {
assert.Equal(t, runtimeApi.ContainerState_CONTAINER_EXITED, c.GetState())
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_EXITED, c.GetState())
}
}
@ -550,10 +550,10 @@ func TestSyncPod(t *testing.T) {
assert.Equal(t, 2, len(fakeImage.Images))
assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
for _, sandbox := range fakeRuntime.Sandboxes {
assert.Equal(t, runtimeApi.PodSandboxState_SANDBOX_READY, sandbox.GetState())
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.GetState())
}
for _, c := range fakeRuntime.Containers {
assert.Equal(t, runtimeApi.ContainerState_CONTAINER_RUNNING, c.GetState())
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.GetState())
}
}
@ -575,11 +575,11 @@ func TestPruneInitContainers(t *testing.T) {
}
templates := []containerTemplate{
{pod: pod, container: &init1, attempt: 2, createdAt: 2, state: runtimeApi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init1, attempt: 1, createdAt: 1, state: runtimeApi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init2, attempt: 1, createdAt: 1, state: runtimeApi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init2, attempt: 0, createdAt: 0, state: runtimeApi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init1, attempt: 0, createdAt: 0, state: runtimeApi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init1, attempt: 2, createdAt: 2, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init1, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init2, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init2, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init1, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
}
fakes := makeFakeContainers(t, m, templates)
fakeRuntime.SetFakeContainers(fakes)
@ -633,12 +633,12 @@ func TestSyncPodWithInitContainers(t *testing.T) {
// and container with default attempt number 0.
buildContainerID := func(pod *v1.Pod, container v1.Container) string {
uid := string(pod.UID)
sandboxID := apitest.BuildSandboxName(&runtimeApi.PodSandboxMetadata{
sandboxID := apitest.BuildSandboxName(&runtimeapi.PodSandboxMetadata{
Name: &pod.Name,
Uid: &uid,
Namespace: &pod.Namespace,
})
return apitest.BuildContainerName(&runtimeApi.ContainerMetadata{Name: &container.Name}, sandboxID)
return apitest.BuildContainerName(&runtimeapi.ContainerMetadata{Name: &container.Name}, sandboxID)
}
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)

View File

@ -24,7 +24,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
@ -59,12 +59,12 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32
}
// generatePodSandboxConfig generates pod sandbox config from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeApi.PodSandboxConfig, error) {
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) {
// TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup
// Refer https://github.com/kubernetes/kubernetes/issues/29871
podUID := string(pod.UID)
podSandboxConfig := &runtimeApi.PodSandboxConfig{
Metadata: &runtimeApi.PodSandboxMetadata{
podSandboxConfig := &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: &pod.Name,
Namespace: &pod.Namespace,
Uid: &podUID,
@ -79,7 +79,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
if err != nil {
return nil, err
}
podSandboxConfig.DnsConfig = &runtimeApi.DNSConfig{
podSandboxConfig.DnsConfig = &runtimeapi.DNSConfig{
Servers: dnsServers,
Searches: dnsSearches,
Options: defaultDNSOptions,
@ -96,7 +96,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
podSandboxConfig.LogDirectory = &logDir
cgroupParent := ""
portMappings := []*runtimeApi.PortMapping{}
portMappings := []*runtimeapi.PortMapping{}
for _, c := range pod.Spec.Containers {
// TODO: use a separate interface to only generate portmappings
opts, err := m.runtimeHelper.GenerateRunContainerOptions(pod, &c, "")
@ -109,7 +109,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
hostPort := int32(port.HostPort)
containerPort := int32(port.ContainerPort)
protocol := toRuntimeProtocol(port.Protocol)
portMappings = append(portMappings, &runtimeApi.PortMapping{
portMappings = append(portMappings, &runtimeapi.PortMapping{
HostIp: &port.HostIP,
HostPort: &hostPort,
ContainerPort: &containerPort,
@ -129,19 +129,19 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
}
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, cgroupParent string) *runtimeApi.LinuxPodSandboxConfig {
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, cgroupParent string) *runtimeapi.LinuxPodSandboxConfig {
if pod.Spec.SecurityContext == nil && cgroupParent == "" {
return nil
}
lc := &runtimeApi.LinuxPodSandboxConfig{}
lc := &runtimeapi.LinuxPodSandboxConfig{}
if cgroupParent != "" {
lc.CgroupParent = &cgroupParent
}
if pod.Spec.SecurityContext != nil {
sc := pod.Spec.SecurityContext
lc.SecurityContext = &runtimeApi.LinuxSandboxSecurityContext{
NamespaceOptions: &runtimeApi.NamespaceOption{
lc.SecurityContext = &runtimeapi.LinuxSandboxSecurityContext{
NamespaceOptions: &runtimeapi.NamespaceOption{
HostNetwork: &pod.Spec.HostNetwork,
HostIpc: &pod.Spec.HostIPC,
HostPid: &pod.Spec.HostPID,
@ -159,7 +159,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, c
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, sc.SupplementalGroups...)
}
if sc.SELinuxOptions != nil {
lc.SecurityContext.SelinuxOptions = &runtimeApi.SELinuxOption{
lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{
User: &sc.SELinuxOptions.User,
Role: &sc.SELinuxOptions.Role,
Type: &sc.SELinuxOptions.Type,
@ -172,11 +172,11 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, c
}
// getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet.
func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeApi.PodSandbox, error) {
var filter *runtimeApi.PodSandboxFilter
func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi.PodSandbox, error) {
var filter *runtimeapi.PodSandboxFilter
if !all {
readyState := runtimeApi.PodSandboxState_SANDBOX_READY
filter = &runtimeApi.PodSandboxFilter{
readyState := runtimeapi.PodSandboxState_SANDBOX_READY
filter = &runtimeapi.PodSandboxFilter{
State: &readyState,
}
}
@ -187,7 +187,7 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeApi
return nil, err
}
result := []*runtimeApi.PodSandbox{}
result := []*runtimeapi.PodSandbox{}
for _, s := range resp {
if !isManagedByKubelet(s.Labels) {
glog.V(5).Infof("Sandbox %s is not managed by kubelet", kubecontainer.BuildPodFullName(
@ -202,7 +202,7 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeApi
}
// determinePodSandboxIP determines the IP address of the given pod sandbox.
func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName string, podSandbox *runtimeApi.PodSandboxStatus) string {
func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName string, podSandbox *runtimeapi.PodSandboxStatus) string {
if podSandbox.Network == nil {
glog.Warningf("Pod Sandbox status doesn't have network information, cannot report IP")
return ""
@ -217,8 +217,8 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName
// getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error).
// Param state could be nil in order to get all sandboxes belonging to same pod.
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, state *runtimeApi.PodSandboxState) ([]string, error) {
filter := &runtimeApi.PodSandboxFilter{
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) {
filter := &runtimeapi.PodSandboxFilter{
State: state,
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)},
}
@ -252,7 +252,7 @@ func (m *kubeGenericRuntimeManager) GetPortForward(podName, podNamespace string,
return nil, fmt.Errorf("failed to find sandboxID for pod %s", format.PodDesc(podName, podNamespace, podUID))
}
// TODO: Port is unused for now, but we may need it in the future.
req := &runtimeApi.PortForwardRequest{
req := &runtimeapi.PortForwardRequest{
PodSandboxId: &sandboxIDs[0],
}
resp, err := m.runtimeService.PortForward(req)

View File

@ -23,7 +23,7 @@ import (
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
)
@ -57,7 +57,7 @@ func TestCreatePodSandbox(t *testing.T) {
id, _, err := m.createPodSandbox(pod, 1)
assert.NoError(t, err)
fakeRuntime.AssertCalls([]string{"RunPodSandbox"})
sandboxes, err := fakeRuntime.ListPodSandbox(&runtimeApi.PodSandboxFilter{Id: &id})
sandboxes, err := fakeRuntime.ListPodSandbox(&runtimeapi.PodSandboxFilter{Id: &id})
assert.NoError(t, err)
assert.Equal(t, len(sandboxes), 1)
// TODO Check pod sandbox configuration

View File

@ -14,6 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package remote containers gRPC implementation of internalApi.RuntimeService
// and internalApi.ImageManagerService.
// Package remote containers gRPC implementation of internalapi.RuntimeService
// and internalapi.ImageManagerService.
package remote

View File

@ -21,18 +21,18 @@ import (
"github.com/golang/glog"
"google.golang.org/grpc"
internalApi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
// RemoteImageService is a gRPC implementation of internalApi.ImageManagerService.
// RemoteImageService is a gRPC implementation of internalapi.ImageManagerService.
type RemoteImageService struct {
timeout time.Duration
imageClient runtimeApi.ImageServiceClient
imageClient runtimeapi.ImageServiceClient
}
// NewRemoteImageService creates a new internalApi.ImageManagerService.
func NewRemoteImageService(addr string, connectionTimout time.Duration) (internalApi.ImageManagerService, error) {
// NewRemoteImageService creates a new internalapi.ImageManagerService.
func NewRemoteImageService(addr string, connectionTimout time.Duration) (internalapi.ImageManagerService, error) {
glog.V(3).Infof("Connecting to image service %s", addr)
conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(connectionTimout), grpc.WithDialer(dial))
if err != nil {
@ -42,16 +42,16 @@ func NewRemoteImageService(addr string, connectionTimout time.Duration) (interna
return &RemoteImageService{
timeout: connectionTimout,
imageClient: runtimeApi.NewImageServiceClient(conn),
imageClient: runtimeapi.NewImageServiceClient(conn),
}, nil
}
// ListImages lists available images.
func (r *RemoteImageService) ListImages(filter *runtimeApi.ImageFilter) ([]*runtimeApi.Image, error) {
func (r *RemoteImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
resp, err := r.imageClient.ListImages(ctx, &runtimeApi.ListImagesRequest{
resp, err := r.imageClient.ListImages(ctx, &runtimeapi.ListImagesRequest{
Filter: filter,
})
if err != nil {
@ -63,11 +63,11 @@ func (r *RemoteImageService) ListImages(filter *runtimeApi.ImageFilter) ([]*runt
}
// ImageStatus returns the status of the image.
func (r *RemoteImageService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi.Image, error) {
func (r *RemoteImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
resp, err := r.imageClient.ImageStatus(ctx, &runtimeApi.ImageStatusRequest{
resp, err := r.imageClient.ImageStatus(ctx, &runtimeapi.ImageStatusRequest{
Image: image,
})
if err != nil {
@ -79,11 +79,11 @@ func (r *RemoteImageService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeA
}
// PullImage pulls an image with authentication config.
func (r *RemoteImageService) PullImage(image *runtimeApi.ImageSpec, auth *runtimeApi.AuthConfig) error {
func (r *RemoteImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) error {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
_, err := r.imageClient.PullImage(ctx, &runtimeApi.PullImageRequest{
_, err := r.imageClient.PullImage(ctx, &runtimeapi.PullImageRequest{
Image: image,
Auth: auth,
})
@ -96,11 +96,11 @@ func (r *RemoteImageService) PullImage(image *runtimeApi.ImageSpec, auth *runtim
}
// RemoveImage removes the image.
func (r *RemoteImageService) RemoveImage(image *runtimeApi.ImageSpec) error {
func (r *RemoteImageService) RemoveImage(image *runtimeapi.ImageSpec) error {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
_, err := r.imageClient.RemoveImage(ctx, &runtimeApi.RemoveImageRequest{
_, err := r.imageClient.RemoveImage(ctx, &runtimeapi.RemoveImageRequest{
Image: image,
})
if err != nil {

View File

@ -23,19 +23,19 @@ import (
"github.com/golang/glog"
"google.golang.org/grpc"
internalApi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
utilexec "k8s.io/kubernetes/pkg/util/exec"
)
// RemoteRuntimeService is a gRPC implementation of internalApi.RuntimeService.
// RemoteRuntimeService is a gRPC implementation of internalapi.RuntimeService.
type RemoteRuntimeService struct {
timeout time.Duration
runtimeClient runtimeApi.RuntimeServiceClient
runtimeClient runtimeapi.RuntimeServiceClient
}
// NewRemoteRuntimeService creates a new internalApi.RuntimeService.
func NewRemoteRuntimeService(addr string, connectionTimout time.Duration) (internalApi.RuntimeService, error) {
// NewRemoteRuntimeService creates a new internalapi.RuntimeService.
func NewRemoteRuntimeService(addr string, connectionTimout time.Duration) (internalapi.RuntimeService, error) {
glog.Infof("Connecting to runtime service %s", addr)
conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(connectionTimout), grpc.WithDialer(dial))
if err != nil {
@ -45,16 +45,16 @@ func NewRemoteRuntimeService(addr string, connectionTimout time.Duration) (inter
return &RemoteRuntimeService{
timeout: connectionTimout,
runtimeClient: runtimeApi.NewRuntimeServiceClient(conn),
runtimeClient: runtimeapi.NewRuntimeServiceClient(conn),
}, nil
}
// Version returns the runtime name, runtime version and runtime API version.
func (r *RemoteRuntimeService) Version(apiVersion string) (*runtimeApi.VersionResponse, error) {
func (r *RemoteRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
typedVersion, err := r.runtimeClient.Version(ctx, &runtimeApi.VersionRequest{
typedVersion, err := r.runtimeClient.Version(ctx, &runtimeapi.VersionRequest{
Version: &apiVersion,
})
if err != nil {
@ -67,11 +67,11 @@ func (r *RemoteRuntimeService) Version(apiVersion string) (*runtimeApi.VersionRe
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure
// the sandbox is in ready state.
func (r *RemoteRuntimeService) RunPodSandbox(config *runtimeApi.PodSandboxConfig) (string, error) {
func (r *RemoteRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error) {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
resp, err := r.runtimeClient.RunPodSandbox(ctx, &runtimeApi.RunPodSandboxRequest{
resp, err := r.runtimeClient.RunPodSandbox(ctx, &runtimeapi.RunPodSandboxRequest{
Config: config,
})
if err != nil {
@ -88,7 +88,7 @@ func (r *RemoteRuntimeService) StopPodSandbox(podSandBoxID string) error {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
_, err := r.runtimeClient.StopPodSandbox(ctx, &runtimeApi.StopPodSandboxRequest{
_, err := r.runtimeClient.StopPodSandbox(ctx, &runtimeapi.StopPodSandboxRequest{
PodSandboxId: &podSandBoxID,
})
if err != nil {
@ -105,7 +105,7 @@ func (r *RemoteRuntimeService) RemovePodSandbox(podSandBoxID string) error {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
_, err := r.runtimeClient.RemovePodSandbox(ctx, &runtimeApi.RemovePodSandboxRequest{
_, err := r.runtimeClient.RemovePodSandbox(ctx, &runtimeapi.RemovePodSandboxRequest{
PodSandboxId: &podSandBoxID,
})
if err != nil {
@ -117,11 +117,11 @@ func (r *RemoteRuntimeService) RemovePodSandbox(podSandBoxID string) error {
}
// PodSandboxStatus returns the status of the PodSandbox.
func (r *RemoteRuntimeService) PodSandboxStatus(podSandBoxID string) (*runtimeApi.PodSandboxStatus, error) {
func (r *RemoteRuntimeService) PodSandboxStatus(podSandBoxID string) (*runtimeapi.PodSandboxStatus, error) {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
resp, err := r.runtimeClient.PodSandboxStatus(ctx, &runtimeApi.PodSandboxStatusRequest{
resp, err := r.runtimeClient.PodSandboxStatus(ctx, &runtimeapi.PodSandboxStatusRequest{
PodSandboxId: &podSandBoxID,
})
if err != nil {
@ -133,11 +133,11 @@ func (r *RemoteRuntimeService) PodSandboxStatus(podSandBoxID string) (*runtimeAp
}
// ListPodSandbox returns a list of PodSandboxes.
func (r *RemoteRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]*runtimeApi.PodSandbox, error) {
func (r *RemoteRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
resp, err := r.runtimeClient.ListPodSandbox(ctx, &runtimeApi.ListPodSandboxRequest{
resp, err := r.runtimeClient.ListPodSandbox(ctx, &runtimeapi.ListPodSandboxRequest{
Filter: filter,
})
if err != nil {
@ -149,11 +149,11 @@ func (r *RemoteRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandboxFilte
}
// CreateContainer creates a new container in the specified PodSandbox.
func (r *RemoteRuntimeService) CreateContainer(podSandBoxID string, config *runtimeApi.ContainerConfig, sandboxConfig *runtimeApi.PodSandboxConfig) (string, error) {
func (r *RemoteRuntimeService) CreateContainer(podSandBoxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
resp, err := r.runtimeClient.CreateContainer(ctx, &runtimeApi.CreateContainerRequest{
resp, err := r.runtimeClient.CreateContainer(ctx, &runtimeapi.CreateContainerRequest{
PodSandboxId: &podSandBoxID,
Config: config,
SandboxConfig: sandboxConfig,
@ -171,7 +171,7 @@ func (r *RemoteRuntimeService) StartContainer(containerID string) error {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
_, err := r.runtimeClient.StartContainer(ctx, &runtimeApi.StartContainerRequest{
_, err := r.runtimeClient.StartContainer(ctx, &runtimeapi.StartContainerRequest{
ContainerId: &containerID,
})
if err != nil {
@ -187,7 +187,7 @@ func (r *RemoteRuntimeService) StopContainer(containerID string, timeout int64)
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
_, err := r.runtimeClient.StopContainer(ctx, &runtimeApi.StopContainerRequest{
_, err := r.runtimeClient.StopContainer(ctx, &runtimeapi.StopContainerRequest{
ContainerId: &containerID,
Timeout: &timeout,
})
@ -205,7 +205,7 @@ func (r *RemoteRuntimeService) RemoveContainer(containerID string) error {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
_, err := r.runtimeClient.RemoveContainer(ctx, &runtimeApi.RemoveContainerRequest{
_, err := r.runtimeClient.RemoveContainer(ctx, &runtimeapi.RemoveContainerRequest{
ContainerId: &containerID,
})
if err != nil {
@ -217,11 +217,11 @@ func (r *RemoteRuntimeService) RemoveContainer(containerID string) error {
}
// ListContainers lists containers by filters.
func (r *RemoteRuntimeService) ListContainers(filter *runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) {
func (r *RemoteRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
resp, err := r.runtimeClient.ListContainers(ctx, &runtimeApi.ListContainersRequest{
resp, err := r.runtimeClient.ListContainers(ctx, &runtimeapi.ListContainersRequest{
Filter: filter,
})
if err != nil {
@ -233,11 +233,11 @@ func (r *RemoteRuntimeService) ListContainers(filter *runtimeApi.ContainerFilter
}
// ContainerStatus returns the container status.
func (r *RemoteRuntimeService) ContainerStatus(containerID string) (*runtimeApi.ContainerStatus, error) {
func (r *RemoteRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
resp, err := r.runtimeClient.ContainerStatus(ctx, &runtimeApi.ContainerStatusRequest{
resp, err := r.runtimeClient.ContainerStatus(ctx, &runtimeapi.ContainerStatusRequest{
ContainerId: &containerID,
})
if err != nil {
@ -255,7 +255,7 @@ func (r *RemoteRuntimeService) ExecSync(containerID string, cmd []string, timeou
defer cancel()
timeoutSeconds := int64(timeout.Seconds())
req := &runtimeApi.ExecSyncRequest{
req := &runtimeapi.ExecSyncRequest{
ContainerId: &containerID,
Cmd: cmd,
Timeout: &timeoutSeconds,
@ -278,7 +278,7 @@ func (r *RemoteRuntimeService) ExecSync(containerID string, cmd []string, timeou
}
// Exec prepares a streaming endpoint to execute a command in the container, and returns the address.
func (r *RemoteRuntimeService) Exec(req *runtimeApi.ExecRequest) (*runtimeApi.ExecResponse, error) {
func (r *RemoteRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
@ -292,7 +292,7 @@ func (r *RemoteRuntimeService) Exec(req *runtimeApi.ExecRequest) (*runtimeApi.Ex
}
// Attach prepares a streaming endpoint to attach to a running container, and returns the address.
func (r *RemoteRuntimeService) Attach(req *runtimeApi.AttachRequest) (*runtimeApi.AttachResponse, error) {
func (r *RemoteRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
@ -306,7 +306,7 @@ func (r *RemoteRuntimeService) Attach(req *runtimeApi.AttachRequest) (*runtimeAp
}
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address.
func (r *RemoteRuntimeService) PortForward(req *runtimeApi.PortForwardRequest) (*runtimeApi.PortForwardResponse, error) {
func (r *RemoteRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
@ -322,14 +322,14 @@ func (r *RemoteRuntimeService) PortForward(req *runtimeApi.PortForwardRequest) (
// UpdateRuntimeConfig updates the config of a runtime service. The only
// update payload currently supported is the pod CIDR assigned to a node,
// and the runtime service just proxies it down to the network plugin.
func (r *RemoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeApi.RuntimeConfig) error {
func (r *RemoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
// Response doesn't contain anything of interest. This translates to an
// Event notification to the network plugin, which can't fail, so we're
// really looking to surface destination unreachable.
_, err := r.runtimeClient.UpdateRuntimeConfig(ctx, &runtimeApi.UpdateRuntimeConfigRequest{
_, err := r.runtimeClient.UpdateRuntimeConfig(ctx, &runtimeapi.UpdateRuntimeConfigRequest{
RuntimeConfig: runtimeConfig,
})
@ -341,11 +341,11 @@ func (r *RemoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeApi.Run
}
// Status returns the status of the runtime.
func (r *RemoteRuntimeService) Status() (*runtimeApi.RuntimeStatus, error) {
func (r *RemoteRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) {
ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel()
resp, err := r.runtimeClient.Status(ctx, &runtimeApi.StatusRequest{})
resp, err := r.runtimeClient.Status(ctx, &runtimeapi.StatusRequest{})
if err != nil {
glog.Errorf("Status from runtime service failed: %v", err)
return nil, err

View File

@ -19,8 +19,8 @@ package rktshim
import (
"time"
kubeletApi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubeletapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
// Runtime provides an API for lifecycle, inspection and introspection
@ -31,12 +31,12 @@ type Runtime struct{}
type RuntimeConfig struct{}
// NewRuntime creates a container.Runtime instance using the Runtime.
func NewRuntime(RuntimeConfig) (kubeletApi.ContainerManager, error) {
func NewRuntime(RuntimeConfig) (kubeletapi.ContainerManager, error) {
return &Runtime{}, nil
}
// CreateContainer creates an app inside the provided pod sandbox and returns the RawContainerID.
func (*Runtime) CreateContainer(string, *runtimeApi.ContainerConfig, *runtimeApi.PodSandboxConfig) (string, error) {
func (*Runtime) CreateContainer(string, *runtimeapi.ContainerConfig, *runtimeapi.PodSandboxConfig) (string, error) {
panic("not implemented")
}
@ -56,12 +56,12 @@ func (*Runtime) RemoveContainer(string) error {
}
// ListContainers lists out the apps residing inside the pod sandbox using the ContainerFilter.
func (*Runtime) ListContainers(*runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) {
func (*Runtime) ListContainers(*runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
panic("not implemented")
}
// ContainerStatus returns the RawContainerStatus of an app inside the pod sandbox.
func (*Runtime) ContainerStatus(string) (*runtimeApi.ContainerStatus, error) {
func (*Runtime) ContainerStatus(string) (*runtimeapi.ContainerStatus, error) {
panic("not implemented")
}
@ -72,11 +72,11 @@ func (*Runtime) ExecSync(containerID string, cmd []string, timeout time.Duration
}
// Exec prepares a streaming endpoint to execute a command in the container, and returns the address.
func (*Runtime) Exec(*runtimeApi.ExecRequest) (*runtimeApi.ExecResponse, error) {
func (*Runtime) Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
panic("not implemented")
}
// Attach prepares a streaming endpoint to attach to a running container, and returns the address.
func (*Runtime) Attach(req *runtimeApi.AttachRequest) (*runtimeApi.AttachResponse, error) {
func (*Runtime) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
panic("not implemented")
}

View File

@ -23,8 +23,8 @@ import (
"math/rand"
"time"
kubeletApi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubeletapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/util/ioutils"
)
@ -61,7 +61,7 @@ type FakeRuntime struct {
type FakeRuntimeConfig struct{}
func NewFakeRuntime() (kubeletApi.ContainerManager, error) {
func NewFakeRuntime() (kubeletapi.ContainerManager, error) {
return &FakeRuntime{Containers: make(containerRegistry)}, nil
}
@ -78,23 +78,23 @@ func newCharacterStreams(in io.Reader, out io.Writer, err io.Writer) characterSt
}
type fakeContainer struct {
Config *runtimeApi.ContainerConfig
Config *runtimeapi.ContainerConfig
Status runtimeApi.ContainerStatus
Status runtimeapi.ContainerStatus
State runtimeApi.ContainerState
State runtimeapi.ContainerState
Streams characterStreams
}
func (c *fakeContainer) Start() {
c.State = runtimeApi.ContainerState_CONTAINER_RUNNING
c.State = runtimeapi.ContainerState_CONTAINER_RUNNING
c.Status.State = &c.State
}
func (c *fakeContainer) Stop() {
c.State = runtimeApi.ContainerState_CONTAINER_EXITED
c.State = runtimeapi.ContainerState_CONTAINER_EXITED
c.Status.State = &c.State
@ -115,7 +115,7 @@ func (c *fakeContainer) Exec(cmd []string, in io.Reader, out, err io.WriteCloser
type containerRegistry map[string]*fakeContainer
func (r *FakeRuntime) CreateContainer(pid string, cfg *runtimeApi.ContainerConfig, sandboxCfg *runtimeApi.PodSandboxConfig) (string, error) {
func (r *FakeRuntime) CreateContainer(pid string, cfg *runtimeapi.ContainerConfig, sandboxCfg *runtimeapi.PodSandboxConfig) (string, error) {
// TODO(tmrts): allow customization
containerIDLength := 8
@ -135,11 +135,11 @@ func (r *FakeRuntime) StartContainer(id string) error {
return ErrContainerNotFound
}
switch c.State {
case runtimeApi.ContainerState_CONTAINER_EXITED:
case runtimeapi.ContainerState_CONTAINER_EXITED:
fallthrough
case runtimeApi.ContainerState_CONTAINER_CREATED:
case runtimeapi.ContainerState_CONTAINER_CREATED:
c.Start()
case runtimeApi.ContainerState_CONTAINER_UNKNOWN:
case runtimeapi.ContainerState_CONTAINER_UNKNOWN:
// TODO(tmrts): add timeout to Start API or generalize timeout somehow
//<-time.After(time.Duration(timeout) * time.Second)
fallthrough
@ -157,9 +157,9 @@ func (r *FakeRuntime) StopContainer(id string, timeout int64) error {
}
switch c.State {
case runtimeApi.ContainerState_CONTAINER_RUNNING:
c.State = runtimeApi.ContainerState_CONTAINER_EXITED // This state might not be the best one
case runtimeApi.ContainerState_CONTAINER_UNKNOWN:
case runtimeapi.ContainerState_CONTAINER_RUNNING:
c.State = runtimeapi.ContainerState_CONTAINER_EXITED // This state might not be the best one
case runtimeapi.ContainerState_CONTAINER_UNKNOWN:
<-time.After(time.Duration(timeout) * time.Second)
fallthrough
default:
@ -181,12 +181,12 @@ func (r *FakeRuntime) RemoveContainer(id string) error {
return nil
}
func (r *FakeRuntime) ListContainers(*runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) {
list := []*runtimeApi.Container{}
func (r *FakeRuntime) ListContainers(*runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
list := []*runtimeapi.Container{}
// TODO(tmrts): apply the filter
for _, c := range r.Containers {
list = append(list, &runtimeApi.Container{
list = append(list, &runtimeapi.Container{
Id: c.Status.Id,
Metadata: c.Config.Metadata,
Labels: c.Config.Labels,
@ -198,10 +198,10 @@ func (r *FakeRuntime) ListContainers(*runtimeApi.ContainerFilter) ([]*runtimeApi
return list, nil
}
func (r *FakeRuntime) ContainerStatus(id string) (*runtimeApi.ContainerStatus, error) {
func (r *FakeRuntime) ContainerStatus(id string) (*runtimeapi.ContainerStatus, error) {
c, ok := r.Containers[id]
if !ok {
return &runtimeApi.ContainerStatus{}, ErrContainerNotFound
return &runtimeapi.ContainerStatus{}, ErrContainerNotFound
}
return &c.Status, nil
@ -214,7 +214,7 @@ func (r *FakeRuntime) ExecSync(containerID string, cmd []string, timeout time.Du
}
// TODO(tmrts): Validate the assumption that container has to be running for exec to work.
if c.State != runtimeApi.ContainerState_CONTAINER_RUNNING {
if c.State != runtimeapi.ContainerState_CONTAINER_RUNNING {
return nil, nil, ErrInvalidContainerStateTransition
}
@ -225,16 +225,16 @@ func (r *FakeRuntime) ExecSync(containerID string, cmd []string, timeout time.Du
return stdoutBuffer.Bytes(), stderrBuffer.Bytes(), err
}
func (r *FakeRuntime) Exec(req *runtimeApi.ExecRequest) (*runtimeApi.ExecResponse, error) {
func (r *FakeRuntime) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
url := "http://" + FakeStreamingHost + ":" + FakeStreamingPort + "/exec/" + req.GetContainerId()
return &runtimeApi.ExecResponse{
return &runtimeapi.ExecResponse{
Url: &url,
}, nil
}
func (r *FakeRuntime) Attach(req *runtimeApi.AttachRequest) (*runtimeApi.AttachResponse, error) {
func (r *FakeRuntime) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
url := "http://" + FakeStreamingHost + ":" + FakeStreamingPort + "/attach/" + req.GetContainerId()
return &runtimeApi.AttachResponse{
return &runtimeapi.AttachResponse{
Url: &url,
}, nil
}

View File

@ -19,7 +19,7 @@ package rktshim
import (
"errors"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
// TODO(tmrts): Move these errors to the container API for code re-use.
@ -41,21 +41,21 @@ func NewImageStore(ImageStoreConfig) (*ImageStore, error) {
}
// List lists the images residing in the image store.
func (*ImageStore) List() ([]runtimeApi.Image, error) {
func (*ImageStore) List() ([]runtimeapi.Image, error) {
panic("not implemented")
}
// Pull pulls an image into the image store and uses the given authentication method.
func (*ImageStore) Pull(runtimeApi.ImageSpec, runtimeApi.AuthConfig, *runtimeApi.PodSandboxConfig) error {
func (*ImageStore) Pull(runtimeapi.ImageSpec, runtimeapi.AuthConfig, *runtimeapi.PodSandboxConfig) error {
panic("not implemented")
}
// Remove removes the image from the image store.
func (*ImageStore) Remove(runtimeApi.ImageSpec) error {
func (*ImageStore) Remove(runtimeapi.ImageSpec) error {
panic("not implemented")
}
// Status returns the status of the image.
func (*ImageStore) Status(runtimeApi.ImageSpec) (runtimeApi.Image, error) {
func (*ImageStore) Status(runtimeapi.ImageSpec) (runtimeapi.Image, error) {
panic("not implemented")
}

View File

@ -21,21 +21,21 @@ import (
"reflect"
"testing"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
var (
emptyImgStoreConfig = ImageStoreConfig{}
// TODO(tmrts): fill the pod configuration
testPodConfig *runtimeApi.PodSandboxConfig = nil
testPodConfig *runtimeapi.PodSandboxConfig = nil
)
type imageTestCase struct {
Spec *runtimeApi.ImageSpec
ExpectedStatus *runtimeApi.Image
Spec *runtimeapi.ImageSpec
ExpectedStatus *runtimeapi.Image
}
func compareContainerImages(got, expected runtimeApi.Image) error {
func compareContainerImages(got, expected runtimeapi.Image) error {
if got.Id != expected.Id {
return fmt.Errorf("mismatching Ids -> expected %q, got %q", got.Id, expected.Id)
}
@ -62,16 +62,16 @@ var (
var testImgSpecs = map[string]imageTestCase{
"non-existent-image": {
&runtimeApi.ImageSpec{
&runtimeapi.ImageSpec{
Image: &gibberishStr,
},
nil,
},
"busybox": {
&runtimeApi.ImageSpec{
&runtimeapi.ImageSpec{
Image: &busyboxStr,
},
&runtimeApi.Image{
&runtimeapi.Image{
Id: nil,
RepoTags: []string{},
RepoDigests: []string{},
@ -80,7 +80,7 @@ var testImgSpecs = map[string]imageTestCase{
},
}
var testAuthConfig = map[string]runtimeApi.AuthConfig{
var testAuthConfig = map[string]runtimeapi.AuthConfig{
"no-auth": {},
}

View File

@ -17,8 +17,8 @@ limitations under the License.
package rktshim
import (
kubeletApi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubeletapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
)
// PodSandboxManager provides basic operations to create/delete and examine
@ -29,12 +29,12 @@ type PodSandboxManager struct{}
type PodSandboxManagerConfig struct{}
// NewPodSandboxManager creates a PodSandboxManager.
func NewPodSandboxManager(PodSandboxManagerConfig) (kubeletApi.PodSandboxManager, error) {
func NewPodSandboxManager(PodSandboxManagerConfig) (kubeletapi.PodSandboxManager, error) {
return &PodSandboxManager{}, nil
}
// RunPodSandbox creates and starts a pod sandbox given a pod sandbox configuration.
func (*PodSandboxManager) RunPodSandbox(*runtimeApi.PodSandboxConfig) (string, error) {
func (*PodSandboxManager) RunPodSandbox(*runtimeapi.PodSandboxConfig) (string, error) {
panic("not implemented")
}
@ -49,16 +49,16 @@ func (*PodSandboxManager) RemovePodSandbox(string) error {
}
// PodSandboxStatus queries the status of the pod sandbox.
func (*PodSandboxManager) PodSandboxStatus(string) (*runtimeApi.PodSandboxStatus, error) {
func (*PodSandboxManager) PodSandboxStatus(string) (*runtimeapi.PodSandboxStatus, error) {
panic("not implemented")
}
// ListPodSandbox lists existing sandboxes, filtered by the PodSandboxFilter.
func (*PodSandboxManager) ListPodSandbox(*runtimeApi.PodSandboxFilter) ([]*runtimeApi.PodSandbox, error) {
func (*PodSandboxManager) ListPodSandbox(*runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
panic("not implemented")
}
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address.
func (*PodSandboxManager) PortForward(*runtimeApi.PortForwardRequest) (*runtimeApi.PortForwardResponse, error) {
func (*PodSandboxManager) PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
panic("not implemented")
}

View File

@ -19,7 +19,7 @@ package cache
import (
"time"
expirationCache "k8s.io/kubernetes/pkg/client/cache"
expirationcache "k8s.io/kubernetes/pkg/client/cache"
)
// ObjectCache is a simple wrapper of expiration cache that
@ -27,7 +27,7 @@ import (
// 2. has an updater to get value directly if it is expired
// 3. then update the cache
type ObjectCache struct {
cache expirationCache.Store
cache expirationcache.Store
updater func() (interface{}, error)
}
@ -42,7 +42,7 @@ type objectEntry struct {
func NewObjectCache(f func() (interface{}, error), ttl time.Duration) *ObjectCache {
return &ObjectCache{
updater: f,
cache: expirationCache.NewTTLStore(stringKeyFunc, ttl),
cache: expirationcache.NewTTLStore(stringKeyFunc, ttl),
}
}

View File

@ -21,7 +21,7 @@ import (
"testing"
"time"
expirationCache "k8s.io/kubernetes/pkg/client/cache"
expirationcache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/util/clock"
)
@ -32,11 +32,11 @@ type testObject struct {
// A fake objectCache for unit test.
func NewFakeObjectCache(f func() (interface{}, error), ttl time.Duration, clock clock.Clock) *ObjectCache {
ttlPolicy := &expirationCache.TTLPolicy{Ttl: ttl, Clock: clock}
ttlPolicy := &expirationcache.TTLPolicy{Ttl: ttl, Clock: clock}
deleteChan := make(chan string, 1)
return &ObjectCache{
updater: f,
cache: expirationCache.NewFakeExpirationStore(stringKeyFunc, deleteChan, ttlPolicy, clock),
cache: expirationcache.NewFakeExpirationStore(stringKeyFunc, deleteChan, ttlPolicy, clock),
}
}

View File

@ -30,7 +30,7 @@ import (
"syscall"
"github.com/golang/glog"
utilExec "k8s.io/kubernetes/pkg/util/exec"
utilexec "k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/sets"
)
@ -332,9 +332,9 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string,
cmd := mounter.Runner.Command("fsck", args...)
out, err := cmd.CombinedOutput()
if err != nil {
ee, isExitError := err.(utilExec.ExitError)
ee, isExitError := err.(utilexec.ExitError)
switch {
case err == utilExec.ErrExecutableNotFound:
case err == utilexec.ErrExecutableNotFound:
glog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsCorrected:
glog.Infof("Device %s has errors which were corrected by fsck.", source)

View File

@ -30,7 +30,7 @@ import (
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
flockerApi "github.com/clusterhq/flocker-go"
flockerapi "github.com/clusterhq/flocker-go"
)
// This is the primary entrypoint for volume plugins.
@ -50,7 +50,7 @@ type flockerVolume struct {
// dataset uuid
datasetUUID string
//pod *v1.Pod
flockerClient flockerApi.Clientable
flockerClient flockerapi.Clientable
manager volumeManager
plugin *flockerPlugin
mounter mount.Interface
@ -229,7 +229,7 @@ func (b *flockerVolumeMounter) SetUp(fsGroup *int64) error {
// newFlockerClient uses environment variables and pod attributes to return a
// flocker client capable of talking with the Flocker control service.
func (p *flockerPlugin) newFlockerClient(hostIP string) (*flockerApi.Client, error) {
func (p *flockerPlugin) newFlockerClient(hostIP string) (*flockerapi.Client, error) {
host := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_HOST", defaultHost)
port, err := env.GetEnvAsIntOrFallback("FLOCKER_CONTROL_SERVICE_PORT", defaultPort)
@ -240,11 +240,11 @@ func (p *flockerPlugin) newFlockerClient(hostIP string) (*flockerApi.Client, err
keyPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_KEY_FILE", defaultClientKeyFile)
certPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_CERT_FILE", defaultClientCertFile)
c, err := flockerApi.NewClient(host, port, hostIP, caCertPath, keyPath, certPath)
c, err := flockerapi.NewClient(host, port, hostIP, caCertPath, keyPath, certPath)
return c, err
}
func (b *flockerVolumeMounter) newFlockerClient() (*flockerApi.Client, error) {
func (b *flockerVolumeMounter) newFlockerClient() (*flockerapi.Client, error) {
hostIP, err := b.plugin.host.GetHostIP()
if err != nil {

View File

@ -28,7 +28,7 @@ import (
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
flockerApi "github.com/clusterhq/flocker-go"
flockerapi "github.com/clusterhq/flocker-go"
"github.com/stretchr/testify/assert"
)
@ -37,14 +37,14 @@ const datasetOneID = "11111111-1111-1111-1111-111111111100"
const nodeOneID = "11111111-1111-1111-1111-111111111111"
const nodeTwoID = "22222222-2222-2222-2222-222222222222"
var _ flockerApi.Clientable = &fakeFlockerClient{}
var _ flockerapi.Clientable = &fakeFlockerClient{}
type fakeFlockerClient struct {
DatasetID string
Primary string
Deleted bool
Metadata map[string]string
Nodes []flockerApi.NodeState
Nodes []flockerapi.NodeState
Error error
}
@ -54,7 +54,7 @@ func newFakeFlockerClient() *fakeFlockerClient {
Primary: nodeOneID,
Deleted: false,
Metadata: map[string]string{"Name": "dataset-one"},
Nodes: []flockerApi.NodeState{
Nodes: []flockerapi.NodeState{
{
Host: "1.2.3.4",
UUID: nodeOneID,
@ -67,13 +67,13 @@ func newFakeFlockerClient() *fakeFlockerClient {
}
}
func (c *fakeFlockerClient) CreateDataset(options *flockerApi.CreateDatasetOptions) (*flockerApi.DatasetState, error) {
func (c *fakeFlockerClient) CreateDataset(options *flockerapi.CreateDatasetOptions) (*flockerapi.DatasetState, error) {
if c.Error != nil {
return nil, c.Error
}
return &flockerApi.DatasetState{
return &flockerapi.DatasetState{
DatasetID: c.DatasetID,
}, nil
}
@ -84,8 +84,8 @@ func (c *fakeFlockerClient) DeleteDataset(datasetID string) error {
return nil
}
func (c *fakeFlockerClient) GetDatasetState(datasetID string) (*flockerApi.DatasetState, error) {
return &flockerApi.DatasetState{}, nil
func (c *fakeFlockerClient) GetDatasetState(datasetID string) (*flockerapi.DatasetState, error) {
return &flockerapi.DatasetState{}, nil
}
func (c *fakeFlockerClient) GetDatasetID(metaName string) (datasetID string, err error) {
@ -99,12 +99,12 @@ func (c *fakeFlockerClient) GetPrimaryUUID() (primaryUUID string, err error) {
return
}
func (c *fakeFlockerClient) ListNodes() (nodes []flockerApi.NodeState, err error) {
func (c *fakeFlockerClient) ListNodes() (nodes []flockerapi.NodeState, err error) {
return c.Nodes, nil
}
func (c *fakeFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerApi.DatasetState, error) {
return &flockerApi.DatasetState{}, nil
func (c *fakeFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerapi.DatasetState, error) {
return &flockerapi.DatasetState{}, nil
}
type fakeFlockerUtil struct {
@ -301,7 +301,7 @@ func TestIsReadOnly(t *testing.T) {
type mockFlockerClient struct {
datasetID, primaryUUID, path string
datasetState *flockerApi.DatasetState
datasetState *flockerapi.DatasetState
}
func newMockFlockerClient(mockDatasetID, mockPrimaryUUID, mockPath string) *mockFlockerClient {
@ -309,7 +309,7 @@ func newMockFlockerClient(mockDatasetID, mockPrimaryUUID, mockPath string) *mock
datasetID: mockDatasetID,
primaryUUID: mockPrimaryUUID,
path: mockPath,
datasetState: &flockerApi.DatasetState{
datasetState: &flockerapi.DatasetState{
Path: mockPath,
DatasetID: mockDatasetID,
Primary: mockPrimaryUUID,
@ -317,10 +317,10 @@ func newMockFlockerClient(mockDatasetID, mockPrimaryUUID, mockPath string) *mock
}
}
func (m mockFlockerClient) CreateDataset(metaName string) (*flockerApi.DatasetState, error) {
func (m mockFlockerClient) CreateDataset(metaName string) (*flockerapi.DatasetState, error) {
return m.datasetState, nil
}
func (m mockFlockerClient) GetDatasetState(datasetID string) (*flockerApi.DatasetState, error) {
func (m mockFlockerClient) GetDatasetState(datasetID string) (*flockerapi.DatasetState, error) {
return m.datasetState, nil
}
func (m mockFlockerClient) GetDatasetID(metaName string) (string, error) {
@ -329,7 +329,7 @@ func (m mockFlockerClient) GetDatasetID(metaName string) (string, error) {
func (m mockFlockerClient) GetPrimaryUUID() (string, error) {
return m.primaryUUID, nil
}
func (m mockFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerApi.DatasetState, error) {
func (m mockFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerapi.DatasetState, error) {
return m.datasetState, nil
}

View File

@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/pkg/util/rand"
"k8s.io/kubernetes/pkg/volume"
flockerApi "github.com/clusterhq/flocker-go"
flockerapi "github.com/clusterhq/flocker-go"
"github.com/golang/glog"
)
@ -75,7 +75,7 @@ func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID
requestBytes := capacity.Value()
volumeSizeGB = int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
createOptions := &flockerApi.CreateDatasetOptions{
createOptions := &flockerapi.CreateDatasetOptions{
MaximumSize: requestBytes,
Metadata: map[string]string{
"type": "k8s-dynamic-prov",

View File

@ -20,7 +20,7 @@ import (
"fmt"
"os"
"path"
goStrings "strings"
gostrings "strings"
"github.com/golang/glog"
"github.com/pborman/uuid"
@ -358,7 +358,7 @@ func (provisioner *quobyteVolumeProvisioner) Provision() (*v1.PersistentVolume,
return nil, err
}
for k, v := range provisioner.options.Parameters {
switch goStrings.ToLower(k) {
switch gostrings.ToLower(k) {
case "registry":
provisioner.registry = v
case "user":
@ -434,7 +434,7 @@ func parseAPIConfig(plugin *quobytePlugin, params map[string]string) (*quobyteAP
deleteKeys := []string{}
for k, v := range params {
switch goStrings.ToLower(k) {
switch gostrings.ToLower(k) {
case "adminsecretname":
secretName = v
deleteKeys = append(deleteKeys, k)

View File

@ -25,7 +25,7 @@ import (
"k8s.io/kubernetes/pkg/volume"
"github.com/golang/glog"
quobyte_api "github.com/quobyte/api"
quobyteapi "github.com/quobyte/api"
)
type quobyteVolumeManager struct {
@ -37,7 +37,7 @@ func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProv
volumeSize := int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024))
// Quobyte has the concept of Volumes which doen't have a specific size (they can grow unlimited)
// to simulate a size constraint we could set here a Quota
volumeRequest := &quobyte_api.CreateVolumeRequest{
volumeRequest := &quobyteapi.CreateVolumeRequest{
Name: provisioner.volume,
RootUserID: provisioner.user,
RootGroupID: provisioner.group,
@ -62,8 +62,8 @@ func (manager *quobyteVolumeManager) deleteVolume(deleter *quobyteVolumeDeleter)
return manager.createQuobyteClient().DeleteVolumeByName(deleter.volume, deleter.tenant)
}
func (manager *quobyteVolumeManager) createQuobyteClient() *quobyte_api.QuobyteClient {
return quobyte_api.NewQuobyteClient(
func (manager *quobyteVolumeManager) createQuobyteClient() *quobyteapi.QuobyteClient {
return quobyteapi.NewQuobyteClient(
manager.config.quobyteAPIServer,
manager.config.quobyteUser,
manager.config.quobytePassword,

Some files were not shown because too many files have changed in this diff Show More