Merge pull request #35258 from feiskyer/package-aliase

Automatic merge from submit-queue

Fix package aliases to follow golang convention

Some package aliases are not not align with golang convention https://blog.golang.org/package-names. This PR fixes them. Also adds a verify script and presubmit checks.

Fixes #35070.

cc/ @timstclair @Random-Liu
This commit is contained in:
Kubernetes Submit Queue
2016-11-30 16:39:46 -08:00
committed by GitHub
108 changed files with 1364 additions and 1318 deletions

View File

@@ -33,7 +33,7 @@ import (
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
kdns "k8s.io/kubernetes/pkg/dns" kdns "k8s.io/kubernetes/pkg/dns"
dnsConfig "k8s.io/kubernetes/pkg/dns/config" dnsconfig "k8s.io/kubernetes/pkg/dns/config"
"k8s.io/kubernetes/pkg/runtime/schema" "k8s.io/kubernetes/pkg/runtime/schema"
) )
@@ -58,15 +58,15 @@ func NewKubeDNSServerDefault(config *options.KubeDNSConfig) *KubeDNSServer {
ks.dnsBindAddress = config.DNSBindAddress ks.dnsBindAddress = config.DNSBindAddress
ks.dnsPort = config.DNSPort ks.dnsPort = config.DNSPort
var configSync dnsConfig.Sync var configSync dnsconfig.Sync
if config.ConfigMap == "" { if config.ConfigMap == "" {
glog.V(0).Infof("ConfigMap not configured, using values from command line flags") glog.V(0).Infof("ConfigMap not configured, using values from command line flags")
configSync = dnsConfig.NewNopSync( configSync = dnsconfig.NewNopSync(
&dnsConfig.Config{Federations: config.Federations}) &dnsconfig.Config{Federations: config.Federations})
} else { } else {
glog.V(0).Infof("Using configuration read from ConfigMap: %v:%v", glog.V(0).Infof("Using configuration read from ConfigMap: %v:%v",
config.ConfigMapNs, config.ConfigMap) config.ConfigMapNs, config.ConfigMap)
configSync = dnsConfig.NewSync( configSync = dnsconfig.NewSync(
kubeClient, config.ConfigMapNs, config.ConfigMap) kubeClient, config.ConfigMapNs, config.ConfigMap)
} }

View File

@@ -19,13 +19,13 @@ package cache
import ( import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/federation/apis/federation/v1beta1" "k8s.io/kubernetes/federation/apis/federation/v1beta1"
kubeCache "k8s.io/kubernetes/pkg/client/cache" kubecache "k8s.io/kubernetes/pkg/client/cache"
) )
// StoreToClusterLister makes a Store have the List method of the unversioned.ClusterInterface // StoreToClusterLister makes a Store have the List method of the unversioned.ClusterInterface
// The Store must contain (only) clusters. // The Store must contain (only) clusters.
type StoreToClusterLister struct { type StoreToClusterLister struct {
kubeCache.Store kubecache.Store
} }
func (s *StoreToClusterLister) List() (clusters v1beta1.ClusterList, err error) { func (s *StoreToClusterLister) List() (clusters v1beta1.ClusterList, err error) {
@@ -41,7 +41,7 @@ type ClusterConditionPredicate func(cluster v1beta1.Cluster) bool
// storeToClusterConditionLister filters and returns nodes matching the given type and status from the store. // storeToClusterConditionLister filters and returns nodes matching the given type and status from the store.
type storeToClusterConditionLister struct { type storeToClusterConditionLister struct {
store kubeCache.Store store kubecache.Store
predicate ClusterConditionPredicate predicate ClusterConditionPredicate
} }

View File

@@ -21,8 +21,8 @@ import (
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
federation_v1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationv1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
cluster_cache "k8s.io/kubernetes/federation/client/cache" clustercache "k8s.io/kubernetes/federation/client/cache"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
@@ -43,14 +43,14 @@ type ClusterController struct {
// clusterMonitorPeriod is the period for updating status of cluster // clusterMonitorPeriod is the period for updating status of cluster
clusterMonitorPeriod time.Duration clusterMonitorPeriod time.Duration
// clusterClusterStatusMap is a mapping of clusterName and cluster status of last sampling // clusterClusterStatusMap is a mapping of clusterName and cluster status of last sampling
clusterClusterStatusMap map[string]federation_v1beta1.ClusterStatus clusterClusterStatusMap map[string]federationv1beta1.ClusterStatus
// clusterKubeClientMap is a mapping of clusterName and restclient // clusterKubeClientMap is a mapping of clusterName and restclient
clusterKubeClientMap map[string]ClusterClient clusterKubeClientMap map[string]ClusterClient
// cluster framework and store // cluster framework and store
clusterController *cache.Controller clusterController *cache.Controller
clusterStore cluster_cache.StoreToClusterLister clusterStore clustercache.StoreToClusterLister
} }
// NewclusterController returns a new cluster controller // NewclusterController returns a new cluster controller
@@ -59,7 +59,7 @@ func NewclusterController(federationClient federationclientset.Interface, cluste
knownClusterSet: make(sets.String), knownClusterSet: make(sets.String),
federationClient: federationClient, federationClient: federationClient,
clusterMonitorPeriod: clusterMonitorPeriod, clusterMonitorPeriod: clusterMonitorPeriod,
clusterClusterStatusMap: make(map[string]federation_v1beta1.ClusterStatus), clusterClusterStatusMap: make(map[string]federationv1beta1.ClusterStatus),
clusterKubeClientMap: make(map[string]ClusterClient), clusterKubeClientMap: make(map[string]ClusterClient),
} }
cc.clusterStore.Store, cc.clusterController = cache.NewInformer( cc.clusterStore.Store, cc.clusterController = cache.NewInformer(
@@ -71,7 +71,7 @@ func NewclusterController(federationClient federationclientset.Interface, cluste
return cc.federationClient.Federation().Clusters().Watch(options) return cc.federationClient.Federation().Clusters().Watch(options)
}, },
}, },
&federation_v1beta1.Cluster{}, &federationv1beta1.Cluster{},
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
DeleteFunc: cc.delFromClusterSet, DeleteFunc: cc.delFromClusterSet,
@@ -84,7 +84,7 @@ func NewclusterController(federationClient federationclientset.Interface, cluste
// delFromClusterSet delete a cluster from clusterSet and // delFromClusterSet delete a cluster from clusterSet and
// delete the corresponding restclient from the map clusterKubeClientMap // delete the corresponding restclient from the map clusterKubeClientMap
func (cc *ClusterController) delFromClusterSet(obj interface{}) { func (cc *ClusterController) delFromClusterSet(obj interface{}) {
cluster := obj.(*federation_v1beta1.Cluster) cluster := obj.(*federationv1beta1.Cluster)
cc.knownClusterSet.Delete(cluster.Name) cc.knownClusterSet.Delete(cluster.Name)
delete(cc.clusterKubeClientMap, cluster.Name) delete(cc.clusterKubeClientMap, cluster.Name)
} }
@@ -92,7 +92,7 @@ func (cc *ClusterController) delFromClusterSet(obj interface{}) {
// addToClusterSet insert the new cluster to clusterSet and create a corresponding // addToClusterSet insert the new cluster to clusterSet and create a corresponding
// restclient to map clusterKubeClientMap // restclient to map clusterKubeClientMap
func (cc *ClusterController) addToClusterSet(obj interface{}) { func (cc *ClusterController) addToClusterSet(obj interface{}) {
cluster := obj.(*federation_v1beta1.Cluster) cluster := obj.(*federationv1beta1.Cluster)
cc.knownClusterSet.Insert(cluster.Name) cc.knownClusterSet.Insert(cluster.Name)
// create the restclient of cluster // create the restclient of cluster
restClient, err := NewClusterClientSet(cluster) restClient, err := NewClusterClientSet(cluster)
@@ -115,7 +115,7 @@ func (cc *ClusterController) Run() {
}, cc.clusterMonitorPeriod, wait.NeverStop) }, cc.clusterMonitorPeriod, wait.NeverStop)
} }
func (cc *ClusterController) GetClusterStatus(cluster *federation_v1beta1.Cluster) (*federation_v1beta1.ClusterStatus, error) { func (cc *ClusterController) GetClusterStatus(cluster *federationv1beta1.Cluster) (*federationv1beta1.ClusterStatus, error) {
// just get the status of cluster, by requesting the restapi "/healthz" // just get the status of cluster, by requesting the restapi "/healthz"
clusterClient, found := cc.clusterKubeClientMap[cluster.Name] clusterClient, found := cc.clusterKubeClientMap[cluster.Name]
if !found { if !found {

View File

@@ -23,9 +23,9 @@ import (
"net/http/httptest" "net/http/httptest"
"testing" "testing"
federation_v1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationv1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
controller_util "k8s.io/kubernetes/federation/pkg/federation-controller/util" controllerutil "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
@@ -35,15 +35,15 @@ import (
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
) )
func newCluster(clusterName string, serverUrl string) *federation_v1beta1.Cluster { func newCluster(clusterName string, serverUrl string) *federationv1beta1.Cluster {
cluster := federation_v1beta1.Cluster{ cluster := federationv1beta1.Cluster{
TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Federation.GroupVersion().String()}, TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Federation.GroupVersion().String()},
ObjectMeta: v1.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(), UID: uuid.NewUUID(),
Name: clusterName, Name: clusterName,
}, },
Spec: federation_v1beta1.ClusterSpec{ Spec: federationv1beta1.ClusterSpec{
ServerAddressByClientCIDRs: []federation_v1beta1.ServerAddressByClientCIDR{ ServerAddressByClientCIDRs: []federationv1beta1.ServerAddressByClientCIDR{
{ {
ClientCIDR: "0.0.0.0/0", ClientCIDR: "0.0.0.0/0",
ServerAddress: serverUrl, ServerAddress: serverUrl,
@@ -54,13 +54,13 @@ func newCluster(clusterName string, serverUrl string) *federation_v1beta1.Cluste
return &cluster return &cluster
} }
func newClusterList(cluster *federation_v1beta1.Cluster) *federation_v1beta1.ClusterList { func newClusterList(cluster *federationv1beta1.Cluster) *federationv1beta1.ClusterList {
clusterList := federation_v1beta1.ClusterList{ clusterList := federationv1beta1.ClusterList{
TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Federation.GroupVersion().String()}, TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Federation.GroupVersion().String()},
ListMeta: unversioned.ListMeta{ ListMeta: unversioned.ListMeta{
SelfLink: "foobar", SelfLink: "foobar",
}, },
Items: []federation_v1beta1.Cluster{}, Items: []federationv1beta1.Cluster{},
} }
clusterList.Items = append(clusterList.Items, *cluster) clusterList.Items = append(clusterList.Items, *cluster)
return &clusterList return &clusterList
@@ -68,7 +68,7 @@ func newClusterList(cluster *federation_v1beta1.Cluster) *federation_v1beta1.Clu
// init a fake http handler, simulate a federation apiserver, response the "DELETE" "PUT" "GET" "UPDATE" // init a fake http handler, simulate a federation apiserver, response the "DELETE" "PUT" "GET" "UPDATE"
// when "canBeGotten" is false, means that user can not get the cluster cluster from apiserver // when "canBeGotten" is false, means that user can not get the cluster cluster from apiserver
func createHttptestFakeHandlerForFederation(clusterList *federation_v1beta1.ClusterList, canBeGotten bool) *http.HandlerFunc { func createHttptestFakeHandlerForFederation(clusterList *federationv1beta1.ClusterList, canBeGotten bool) *http.HandlerFunc {
fakeHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fakeHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
clusterListString, _ := json.Marshal(*clusterList) clusterListString, _ := json.Marshal(*clusterList)
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
@@ -125,8 +125,8 @@ func TestUpdateClusterStatusOK(t *testing.T) {
federationClientSet := federationclientset.NewForConfigOrDie(restclient.AddUserAgent(restClientCfg, "cluster-controller")) federationClientSet := federationclientset.NewForConfigOrDie(restclient.AddUserAgent(restClientCfg, "cluster-controller"))
// Override KubeconfigGetterForCluster to avoid having to setup service accounts and mount files with secret tokens. // Override KubeconfigGetterForCluster to avoid having to setup service accounts and mount files with secret tokens.
originalGetter := controller_util.KubeconfigGetterForCluster originalGetter := controllerutil.KubeconfigGetterForCluster
controller_util.KubeconfigGetterForCluster = func(c *federation_v1beta1.Cluster) clientcmd.KubeconfigGetter { controllerutil.KubeconfigGetterForCluster = func(c *federationv1beta1.Cluster) clientcmd.KubeconfigGetter {
return func() (*clientcmdapi.Config, error) { return func() (*clientcmdapi.Config, error) {
return &clientcmdapi.Config{}, nil return &clientcmdapi.Config{}, nil
} }
@@ -141,11 +141,11 @@ func TestUpdateClusterStatusOK(t *testing.T) {
if !found { if !found {
t.Errorf("Failed to Update Cluster Status") t.Errorf("Failed to Update Cluster Status")
} else { } else {
if (clusterStatus.Conditions[1].Status != v1.ConditionFalse) || (clusterStatus.Conditions[1].Type != federation_v1beta1.ClusterOffline) { if (clusterStatus.Conditions[1].Status != v1.ConditionFalse) || (clusterStatus.Conditions[1].Type != federationv1beta1.ClusterOffline) {
t.Errorf("Failed to Update Cluster Status") t.Errorf("Failed to Update Cluster Status")
} }
} }
// Reset KubeconfigGetterForCluster // Reset KubeconfigGetterForCluster
controller_util.KubeconfigGetterForCluster = originalGetter controllerutil.KubeconfigGetterForCluster = originalGetter
} }

View File

@@ -19,17 +19,17 @@ package configmap
import ( import (
"time" "time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util" "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink" "k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@@ -79,7 +79,7 @@ type ConfigMapController struct {
func NewConfigMapController(client federationclientset.Interface) *ConfigMapController { func NewConfigMapController(client federationclientset.Interface) *ConfigMapController {
broadcaster := record.NewBroadcaster() broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client)) broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(api_v1.EventSource{Component: "federated-configmaps-controller"}) recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-configmaps-controller"})
configmapcontroller := &ConfigMapController{ configmapcontroller := &ConfigMapController{
federatedApiClient: client, federatedApiClient: client,
@@ -98,43 +98,43 @@ func NewConfigMapController(client federationclientset.Interface) *ConfigMapCont
// Start informer on federated API servers on configmaps that should be federated. // Start informer on federated API servers on configmaps that should be federated.
configmapcontroller.configmapInformerStore, configmapcontroller.configmapInformerController = cache.NewInformer( configmapcontroller.configmapInformerStore, configmapcontroller.configmapInformerController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return client.Core().ConfigMaps(api_v1.NamespaceAll).List(options) return client.Core().ConfigMaps(apiv1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return client.Core().ConfigMaps(api_v1.NamespaceAll).Watch(options) return client.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options)
}, },
}, },
&api_v1.ConfigMap{}, &apiv1.ConfigMap{},
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { configmapcontroller.deliverConfigMapObj(obj, 0, false) })) util.NewTriggerOnAllChanges(func(obj pkgruntime.Object) { configmapcontroller.deliverConfigMapObj(obj, 0, false) }))
// Federated informer on configmaps in members of federation. // Federated informer on configmaps in members of federation.
configmapcontroller.configmapFederatedInformer = util.NewFederatedInformer( configmapcontroller.configmapFederatedInformer = util.NewFederatedInformer(
client, client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) { func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Core().ConfigMaps(api_v1.NamespaceAll).List(options) return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return targetClient.Core().ConfigMaps(api_v1.NamespaceAll).Watch(options) return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options)
}, },
}, },
&api_v1.ConfigMap{}, &apiv1.ConfigMap{},
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it // Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some configmap opration succeeded. // would be just confirmation that some configmap opration succeeded.
util.NewTriggerOnAllChanges( util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) { func(obj pkgruntime.Object) {
configmapcontroller.deliverConfigMapObj(obj, configmapcontroller.configmapReviewDelay, false) configmapcontroller.deliverConfigMapObj(obj, configmapcontroller.configmapReviewDelay, false)
}, },
)) ))
}, },
&util.ClusterLifecycleHandlerFuncs{ &util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) { ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the configmaps again. // When new cluster becomes available process all the configmaps again.
configmapcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(configmapcontroller.clusterAvailableDelay)) configmapcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(configmapcontroller.clusterAvailableDelay))
}, },
@@ -143,19 +143,19 @@ func NewConfigMapController(client federationclientset.Interface) *ConfigMapCont
// Federated updater along with Create/Update/Delete operations. // Federated updater along with Create/Update/Delete operations.
configmapcontroller.federatedUpdater = util.NewFederatedUpdater(configmapcontroller.configmapFederatedInformer, configmapcontroller.federatedUpdater = util.NewFederatedUpdater(configmapcontroller.configmapFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*api_v1.ConfigMap) configmap := obj.(*apiv1.ConfigMap)
_, err := client.Core().ConfigMaps(configmap.Namespace).Create(configmap) _, err := client.Core().ConfigMaps(configmap.Namespace).Create(configmap)
return err return err
}, },
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*api_v1.ConfigMap) configmap := obj.(*apiv1.ConfigMap)
_, err := client.Core().ConfigMaps(configmap.Namespace).Update(configmap) _, err := client.Core().ConfigMaps(configmap.Namespace).Update(configmap)
return err return err
}, },
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*api_v1.ConfigMap) configmap := obj.(*apiv1.ConfigMap)
err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &api_v1.DeleteOptions{}) err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &apiv1.DeleteOptions{})
return err return err
}) })
return configmapcontroller return configmapcontroller
@@ -179,7 +179,7 @@ func (configmapcontroller *ConfigMapController) Run(stopChan <-chan struct{}) {
} }
func (configmapcontroller *ConfigMapController) deliverConfigMapObj(obj interface{}, delay time.Duration, failed bool) { func (configmapcontroller *ConfigMapController) deliverConfigMapObj(obj interface{}, delay time.Duration, failed bool) {
configmap := obj.(*api_v1.ConfigMap) configmap := obj.(*apiv1.ConfigMap)
configmapcontroller.deliverConfigMap(types.NamespacedName{Namespace: configmap.Namespace, Name: configmap.Name}, delay, failed) configmapcontroller.deliverConfigMap(types.NamespacedName{Namespace: configmap.Namespace, Name: configmap.Name}, delay, failed)
} }
@@ -220,7 +220,7 @@ func (configmapcontroller *ConfigMapController) reconcileConfigMapsOnClusterChan
configmapcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(configmapcontroller.clusterAvailableDelay)) configmapcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(configmapcontroller.clusterAvailableDelay))
} }
for _, obj := range configmapcontroller.configmapInformerStore.List() { for _, obj := range configmapcontroller.configmapInformerStore.List() {
configmap := obj.(*api_v1.ConfigMap) configmap := obj.(*apiv1.ConfigMap)
configmapcontroller.deliverConfigMap(types.NamespacedName{Namespace: configmap.Namespace, Name: configmap.Name}, configmapcontroller.deliverConfigMap(types.NamespacedName{Namespace: configmap.Namespace, Name: configmap.Name},
configmapcontroller.smallDelay, false) configmapcontroller.smallDelay, false)
} }
@@ -247,7 +247,7 @@ func (configmapcontroller *ConfigMapController) reconcileConfigMap(configmap typ
glog.V(8).Infof("Skipping not federated config map: %s", key) glog.V(8).Infof("Skipping not federated config map: %s", key)
return return
} }
baseConfigMap := baseConfigMapObj.(*api_v1.ConfigMap) baseConfigMap := baseConfigMapObj.(*apiv1.ConfigMap)
clusters, err := configmapcontroller.configmapFederatedInformer.GetReadyClusters() clusters, err := configmapcontroller.configmapFederatedInformer.GetReadyClusters()
if err != nil { if err != nil {
@@ -266,7 +266,7 @@ func (configmapcontroller *ConfigMapController) reconcileConfigMap(configmap typ
} }
// Do not modify data. // Do not modify data.
desiredConfigMap := &api_v1.ConfigMap{ desiredConfigMap := &apiv1.ConfigMap{
ObjectMeta: util.DeepCopyRelevantObjectMeta(baseConfigMap.ObjectMeta), ObjectMeta: util.DeepCopyRelevantObjectMeta(baseConfigMap.ObjectMeta),
Data: baseConfigMap.Data, Data: baseConfigMap.Data,
} }
@@ -281,7 +281,7 @@ func (configmapcontroller *ConfigMapController) reconcileConfigMap(configmap typ
ClusterName: cluster.Name, ClusterName: cluster.Name,
}) })
} else { } else {
clusterConfigMap := clusterConfigMapObj.(*api_v1.ConfigMap) clusterConfigMap := clusterConfigMapObj.(*apiv1.ConfigMap)
// Update existing configmap, if needed. // Update existing configmap, if needed.
if !util.ConfigMapEquivalent(desiredConfigMap, clusterConfigMap) { if !util.ConfigMapEquivalent(desiredConfigMap, clusterConfigMap) {

View File

@@ -21,13 +21,13 @@ import (
"testing" "testing"
"time" "time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fake_fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake" fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util" "k8s.io/kubernetes/federation/pkg/federation-controller/util"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test" . "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@@ -36,29 +36,29 @@ import (
) )
func TestConfigMapController(t *testing.T) { func TestConfigMapController(t *testing.T) {
cluster1 := NewCluster("cluster1", api_v1.ConditionTrue) cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", api_v1.ConditionTrue) cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
fakeClient := &fake_fedclientset.Clientset{} fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federation_api.ClusterList{Items: []federation_api.Cluster{*cluster1}}) RegisterFakeList("clusters", &fakeClient.Fake, &federationapi.ClusterList{Items: []federationapi.Cluster{*cluster1}})
RegisterFakeList("configmaps", &fakeClient.Fake, &api_v1.ConfigMapList{Items: []api_v1.ConfigMap{}}) RegisterFakeList("configmaps", &fakeClient.Fake, &apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}})
configmapWatch := RegisterFakeWatch("configmaps", &fakeClient.Fake) configmapWatch := RegisterFakeWatch("configmaps", &fakeClient.Fake)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake) clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fake_kubeclientset.Clientset{} cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("configmaps", &cluster1Client.Fake) cluster1Watch := RegisterFakeWatch("configmaps", &cluster1Client.Fake)
RegisterFakeList("configmaps", &cluster1Client.Fake, &api_v1.ConfigMapList{Items: []api_v1.ConfigMap{}}) RegisterFakeList("configmaps", &cluster1Client.Fake, &apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("configmaps", &cluster1Client.Fake, cluster1Watch) cluster1CreateChan := RegisterFakeCopyOnCreate("configmaps", &cluster1Client.Fake, cluster1Watch)
cluster1UpdateChan := RegisterFakeCopyOnUpdate("configmaps", &cluster1Client.Fake, cluster1Watch) cluster1UpdateChan := RegisterFakeCopyOnUpdate("configmaps", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fake_kubeclientset.Clientset{} cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("configmaps", &cluster2Client.Fake) cluster2Watch := RegisterFakeWatch("configmaps", &cluster2Client.Fake)
RegisterFakeList("configmaps", &cluster2Client.Fake, &api_v1.ConfigMapList{Items: []api_v1.ConfigMap{}}) RegisterFakeList("configmaps", &cluster2Client.Fake, &apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}})
cluster2CreateChan := RegisterFakeCopyOnCreate("configmaps", &cluster2Client.Fake, cluster2Watch) cluster2CreateChan := RegisterFakeCopyOnCreate("configmaps", &cluster2Client.Fake, cluster2Watch)
configmapController := NewConfigMapController(fakeClient) configmapController := NewConfigMapController(fakeClient)
informer := ToFederatedInformerForTestOnly(configmapController.configmapFederatedInformer) informer := ToFederatedInformerForTestOnly(configmapController.configmapFederatedInformer)
informer.SetClientFactory(func(cluster *federation_api.Cluster) (kubeclientset.Interface, error) { informer.SetClientFactory(func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name { switch cluster.Name {
case cluster1.Name: case cluster1.Name:
return cluster1Client, nil return cluster1Client, nil
@@ -77,8 +77,8 @@ func TestConfigMapController(t *testing.T) {
stop := make(chan struct{}) stop := make(chan struct{})
configmapController.Run(stop) configmapController.Run(stop)
configmap1 := &api_v1.ConfigMap{ configmap1 := &apiv1.ConfigMap{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Name: "test-configmap", Name: "test-configmap",
Namespace: "ns", Namespace: "ns",
SelfLink: "/api/v1/namespaces/ns/configmaps/test-configmap", SelfLink: "/api/v1/namespaces/ns/configmaps/test-configmap",
@@ -136,7 +136,7 @@ func TestConfigMapController(t *testing.T) {
close(stop) close(stop)
} }
func GetConfigMapFromChan(c chan runtime.Object) *api_v1.ConfigMap { func GetConfigMapFromChan(c chan runtime.Object) *apiv1.ConfigMap {
configmap := GetObjectFromChan(c).(*api_v1.ConfigMap) configmap := GetObjectFromChan(c).(*apiv1.ConfigMap)
return configmap return configmap
} }

View File

@@ -21,21 +21,21 @@ import (
"reflect" "reflect"
"time" "time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util" "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper" "k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink" "k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/conversion"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@@ -87,7 +87,7 @@ type DaemonSetController struct {
func NewDaemonSetController(client federationclientset.Interface) *DaemonSetController { func NewDaemonSetController(client federationclientset.Interface) *DaemonSetController {
broadcaster := record.NewBroadcaster() broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client)) broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(api_v1.EventSource{Component: "federated-daemonset-controller"}) recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-daemonset-controller"})
daemonsetcontroller := &DaemonSetController{ daemonsetcontroller := &DaemonSetController{
federatedApiClient: client, federatedApiClient: client,
@@ -106,28 +106,28 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
// Start informer in federated API servers on daemonsets that should be federated. // Start informer in federated API servers on daemonsets that should be federated.
daemonsetcontroller.daemonsetInformerStore, daemonsetcontroller.daemonsetInformerController = cache.NewInformer( daemonsetcontroller.daemonsetInformerStore, daemonsetcontroller.daemonsetInformerController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return client.Extensions().DaemonSets(api_v1.NamespaceAll).List(options) return client.Extensions().DaemonSets(apiv1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return client.Extensions().DaemonSets(api_v1.NamespaceAll).Watch(options) return client.Extensions().DaemonSets(apiv1.NamespaceAll).Watch(options)
}, },
}, },
&extensionsv1.DaemonSet{}, &extensionsv1.DaemonSet{},
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { daemonsetcontroller.deliverDaemonSetObj(obj, 0, false) })) util.NewTriggerOnAllChanges(func(obj pkgruntime.Object) { daemonsetcontroller.deliverDaemonSetObj(obj, 0, false) }))
// Federated informer on daemonsets in members of federation. // Federated informer on daemonsets in members of federation.
daemonsetcontroller.daemonsetFederatedInformer = util.NewFederatedInformer( daemonsetcontroller.daemonsetFederatedInformer = util.NewFederatedInformer(
client, client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) { func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Extensions().DaemonSets(api_v1.NamespaceAll).List(options) return targetClient.Extensions().DaemonSets(apiv1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return targetClient.Extensions().DaemonSets(api_v1.NamespaceAll).Watch(options) return targetClient.Extensions().DaemonSets(apiv1.NamespaceAll).Watch(options)
}, },
}, },
&extensionsv1.DaemonSet{}, &extensionsv1.DaemonSet{},
@@ -135,14 +135,14 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it // Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some daemonset opration succeeded. // would be just confirmation that some daemonset opration succeeded.
util.NewTriggerOnAllChanges( util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) { func(obj pkgruntime.Object) {
daemonsetcontroller.deliverDaemonSetObj(obj, daemonsetcontroller.daemonsetReviewDelay, false) daemonsetcontroller.deliverDaemonSetObj(obj, daemonsetcontroller.daemonsetReviewDelay, false)
}, },
)) ))
}, },
&util.ClusterLifecycleHandlerFuncs{ &util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) { ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the daemonsets again. // When new cluster becomes available process all the daemonsets again.
daemonsetcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(daemonsetcontroller.clusterAvailableDelay)) daemonsetcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(daemonsetcontroller.clusterAvailableDelay))
}, },
@@ -151,7 +151,7 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
// Federated updater along with Create/Update/Delete operations. // Federated updater along with Create/Update/Delete operations.
daemonsetcontroller.federatedUpdater = util.NewFederatedUpdater(daemonsetcontroller.daemonsetFederatedInformer, daemonsetcontroller.federatedUpdater = util.NewFederatedUpdater(daemonsetcontroller.daemonsetFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet) daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to create daemonset: %s/%s", daemonset.Namespace, daemonset.Name) glog.V(4).Infof("Attempting to create daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
_, err := client.Extensions().DaemonSets(daemonset.Namespace).Create(daemonset) _, err := client.Extensions().DaemonSets(daemonset.Namespace).Create(daemonset)
@@ -162,7 +162,7 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
} }
return err return err
}, },
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet) daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to update daemonset: %s/%s", daemonset.Namespace, daemonset.Name) glog.V(4).Infof("Attempting to update daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
_, err := client.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset) _, err := client.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset)
@@ -173,10 +173,10 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
} }
return err return err
}, },
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet) daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to delete daemonset: %s/%s", daemonset.Namespace, daemonset.Name) glog.V(4).Infof("Attempting to delete daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &api_v1.DeleteOptions{}) err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &apiv1.DeleteOptions{})
if err != nil { if err != nil {
glog.Errorf("Error deleting daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err) glog.Errorf("Error deleting daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
} else { } else {
@@ -190,7 +190,7 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
daemonsetcontroller.removeFinalizerFunc, daemonsetcontroller.removeFinalizerFunc,
daemonsetcontroller.addFinalizerFunc, daemonsetcontroller.addFinalizerFunc,
// objNameFunc // objNameFunc
func(obj pkg_runtime.Object) string { func(obj pkgruntime.Object) string {
daemonset := obj.(*extensionsv1.DaemonSet) daemonset := obj.(*extensionsv1.DaemonSet)
return daemonset.Name return daemonset.Name
}, },
@@ -204,7 +204,7 @@ func NewDaemonSetController(client federationclientset.Interface) *DaemonSetCont
} }
// Returns true if the given object has the given finalizer in its ObjectMeta. // Returns true if the given object has the given finalizer in its ObjectMeta.
func (daemonsetcontroller *DaemonSetController) hasFinalizerFunc(obj pkg_runtime.Object, finalizer string) bool { func (daemonsetcontroller *DaemonSetController) hasFinalizerFunc(obj pkgruntime.Object, finalizer string) bool {
daemonset := obj.(*extensionsv1.DaemonSet) daemonset := obj.(*extensionsv1.DaemonSet)
for i := range daemonset.ObjectMeta.Finalizers { for i := range daemonset.ObjectMeta.Finalizers {
if string(daemonset.ObjectMeta.Finalizers[i]) == finalizer { if string(daemonset.ObjectMeta.Finalizers[i]) == finalizer {
@@ -216,7 +216,7 @@ func (daemonsetcontroller *DaemonSetController) hasFinalizerFunc(obj pkg_runtime
// Removes the finalizer from the given objects ObjectMeta. // Removes the finalizer from the given objects ObjectMeta.
// Assumes that the given object is a daemonset. // Assumes that the given object is a daemonset.
func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkg_runtime.Object, finalizer string) (pkg_runtime.Object, error) { func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
daemonset := obj.(*extensionsv1.DaemonSet) daemonset := obj.(*extensionsv1.DaemonSet)
newFinalizers := []string{} newFinalizers := []string{}
hasFinalizer := false hasFinalizer := false
@@ -241,7 +241,7 @@ func (daemonsetcontroller *DaemonSetController) removeFinalizerFunc(obj pkg_runt
// Adds the given finalizer to the given objects ObjectMeta. // Adds the given finalizer to the given objects ObjectMeta.
// Assumes that the given object is a daemonset. // Assumes that the given object is a daemonset.
func (daemonsetcontroller *DaemonSetController) addFinalizerFunc(obj pkg_runtime.Object, finalizer string) (pkg_runtime.Object, error) { func (daemonsetcontroller *DaemonSetController) addFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
daemonset := obj.(*extensionsv1.DaemonSet) daemonset := obj.(*extensionsv1.DaemonSet)
daemonset.ObjectMeta.Finalizers = append(daemonset.ObjectMeta.Finalizers, finalizer) daemonset.ObjectMeta.Finalizers = append(daemonset.ObjectMeta.Finalizers, finalizer)
daemonset, err := daemonsetcontroller.federatedApiClient.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset) daemonset, err := daemonsetcontroller.federatedApiClient.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset)

View File

@@ -22,16 +22,16 @@ import (
"testing" "testing"
"time" "time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fake_fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake" fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util" "k8s.io/kubernetes/federation/pkg/federation-controller/util"
//"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper" //"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test" . "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@@ -39,30 +39,30 @@ import (
) )
func TestDaemonSetController(t *testing.T) { func TestDaemonSetController(t *testing.T) {
cluster1 := NewCluster("cluster1", api_v1.ConditionTrue) cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", api_v1.ConditionTrue) cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
fakeClient := &fake_fedclientset.Clientset{} fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federation_api.ClusterList{Items: []federation_api.Cluster{*cluster1}}) RegisterFakeList("clusters", &fakeClient.Fake, &federationapi.ClusterList{Items: []federationapi.Cluster{*cluster1}})
RegisterFakeList("daemonsets", &fakeClient.Fake, &extensionsv1.DaemonSetList{Items: []extensionsv1.DaemonSet{}}) RegisterFakeList("daemonsets", &fakeClient.Fake, &extensionsv1.DaemonSetList{Items: []extensionsv1.DaemonSet{}})
daemonsetWatch := RegisterFakeWatch("daemonsets", &fakeClient.Fake) daemonsetWatch := RegisterFakeWatch("daemonsets", &fakeClient.Fake)
// daemonsetUpdateChan := RegisterFakeCopyOnUpdate("daemonsets", &fakeClient.Fake, daemonsetWatch) // daemonsetUpdateChan := RegisterFakeCopyOnUpdate("daemonsets", &fakeClient.Fake, daemonsetWatch)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake) clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fake_kubeclientset.Clientset{} cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("daemonsets", &cluster1Client.Fake) cluster1Watch := RegisterFakeWatch("daemonsets", &cluster1Client.Fake)
RegisterFakeList("daemonsets", &cluster1Client.Fake, &extensionsv1.DaemonSetList{Items: []extensionsv1.DaemonSet{}}) RegisterFakeList("daemonsets", &cluster1Client.Fake, &extensionsv1.DaemonSetList{Items: []extensionsv1.DaemonSet{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("daemonsets", &cluster1Client.Fake, cluster1Watch) cluster1CreateChan := RegisterFakeCopyOnCreate("daemonsets", &cluster1Client.Fake, cluster1Watch)
// cluster1UpdateChan := RegisterFakeCopyOnUpdate("daemonsets", &cluster1Client.Fake, cluster1Watch) // cluster1UpdateChan := RegisterFakeCopyOnUpdate("daemonsets", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fake_kubeclientset.Clientset{} cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("daemonsets", &cluster2Client.Fake) cluster2Watch := RegisterFakeWatch("daemonsets", &cluster2Client.Fake)
RegisterFakeList("daemonsets", &cluster2Client.Fake, &extensionsv1.DaemonSetList{Items: []extensionsv1.DaemonSet{}}) RegisterFakeList("daemonsets", &cluster2Client.Fake, &extensionsv1.DaemonSetList{Items: []extensionsv1.DaemonSet{}})
cluster2CreateChan := RegisterFakeCopyOnCreate("daemonsets", &cluster2Client.Fake, cluster2Watch) cluster2CreateChan := RegisterFakeCopyOnCreate("daemonsets", &cluster2Client.Fake, cluster2Watch)
daemonsetController := NewDaemonSetController(fakeClient) daemonsetController := NewDaemonSetController(fakeClient)
informer := ToFederatedInformerForTestOnly(daemonsetController.daemonsetFederatedInformer) informer := ToFederatedInformerForTestOnly(daemonsetController.daemonsetFederatedInformer)
informer.SetClientFactory(func(cluster *federation_api.Cluster) (kubeclientset.Interface, error) { informer.SetClientFactory(func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name { switch cluster.Name {
case cluster1.Name: case cluster1.Name:
return cluster1Client, nil return cluster1Client, nil
@@ -82,7 +82,7 @@ func TestDaemonSetController(t *testing.T) {
daemonsetController.Run(stop) daemonsetController.Run(stop)
daemonset1 := extensionsv1.DaemonSet{ daemonset1 := extensionsv1.DaemonSet{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Name: "test-daemonset", Name: "test-daemonset",
Namespace: "ns", Namespace: "ns",
SelfLink: "/api/v1/namespaces/ns/daemonsets/test-daemonset", SelfLink: "/api/v1/namespaces/ns/daemonsets/test-daemonset",
@@ -102,7 +102,7 @@ func TestDaemonSetController(t *testing.T) {
updatedDaemonSet := GetDaemonSetFromChan(daemonsetUpdateChan) updatedDaemonSet := GetDaemonSetFromChan(daemonsetUpdateChan)
assert.True(t, daemonsetController.hasFinalizerFunc(updatedDaemonSet, deletionhelper.FinalizerDeleteFromUnderlyingClusters)) assert.True(t, daemonsetController.hasFinalizerFunc(updatedDaemonSet, deletionhelper.FinalizerDeleteFromUnderlyingClusters))
updatedDaemonSet = GetDaemonSetFromChan(daemonsetUpdateChan) updatedDaemonSet = GetDaemonSetFromChan(daemonsetUpdateChan)
assert.True(t, daemonsetController.hasFinalizerFunc(updatedDaemonSet, api_v1.FinalizerOrphan)) assert.True(t, daemonsetController.hasFinalizerFunc(updatedDaemonSet, apiv1.FinalizerOrphan))
daemonset1 = *updatedDaemonSet daemonset1 = *updatedDaemonSet
*/ */
createdDaemonSet := GetDaemonSetFromChan(cluster1CreateChan) createdDaemonSet := GetDaemonSetFromChan(cluster1CreateChan)

View File

@@ -23,13 +23,13 @@ import (
"time" "time"
fedv1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" fedv1 "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fake_fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake" fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test" . "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
"k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/meta"
apiv1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@@ -83,19 +83,19 @@ func TestDeploymentController(t *testing.T) {
cluster1 := NewCluster("cluster1", apiv1.ConditionTrue) cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", apiv1.ConditionTrue) cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
fakeClient := &fake_fedclientset.Clientset{} fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &fedv1.ClusterList{Items: []fedv1.Cluster{*cluster1}}) RegisterFakeList("clusters", &fakeClient.Fake, &fedv1.ClusterList{Items: []fedv1.Cluster{*cluster1}})
deploymentsWatch := RegisterFakeWatch("deployments", &fakeClient.Fake) deploymentsWatch := RegisterFakeWatch("deployments", &fakeClient.Fake)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake) clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fake_kubeclientset.Clientset{} cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("deployments", &cluster1Client.Fake) cluster1Watch := RegisterFakeWatch("deployments", &cluster1Client.Fake)
_ = RegisterFakeWatch("pods", &cluster1Client.Fake) _ = RegisterFakeWatch("pods", &cluster1Client.Fake)
RegisterFakeList("deployments", &cluster1Client.Fake, &extensionsv1.DeploymentList{Items: []extensionsv1.Deployment{}}) RegisterFakeList("deployments", &cluster1Client.Fake, &extensionsv1.DeploymentList{Items: []extensionsv1.Deployment{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("deployments", &cluster1Client.Fake, cluster1Watch) cluster1CreateChan := RegisterFakeCopyOnCreate("deployments", &cluster1Client.Fake, cluster1Watch)
cluster1UpdateChan := RegisterFakeCopyOnUpdate("deployments", &cluster1Client.Fake, cluster1Watch) cluster1UpdateChan := RegisterFakeCopyOnUpdate("deployments", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fake_kubeclientset.Clientset{} cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("deployments", &cluster2Client.Fake) cluster2Watch := RegisterFakeWatch("deployments", &cluster2Client.Fake)
_ = RegisterFakeWatch("pods", &cluster2Client.Fake) _ = RegisterFakeWatch("pods", &cluster2Client.Fake)
RegisterFakeList("deployments", &cluster2Client.Fake, &extensionsv1.DeploymentList{Items: []extensionsv1.Deployment{}}) RegisterFakeList("deployments", &cluster2Client.Fake, &extensionsv1.DeploymentList{Items: []extensionsv1.Deployment{}})

View File

@@ -21,7 +21,7 @@ import (
"sync" "sync"
"time" "time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util" "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper" "k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
@@ -29,13 +29,13 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
extensions_v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" extensionsv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/conversion"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@@ -134,17 +134,17 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
// Start informer in federated API servers on ingresses that should be federated. // Start informer in federated API servers on ingresses that should be federated.
ic.ingressInformerStore, ic.ingressInformerController = cache.NewInformer( ic.ingressInformerStore, ic.ingressInformerController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return client.Extensions().Ingresses(api.NamespaceAll).List(options) return client.Extensions().Ingresses(api.NamespaceAll).List(options)
}, },
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return client.Extensions().Ingresses(api.NamespaceAll).Watch(options) return client.Extensions().Ingresses(api.NamespaceAll).Watch(options)
}, },
}, },
&extensions_v1beta1.Ingress{}, &extensionsv1beta1.Ingress{},
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges( util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) { func(obj pkgruntime.Object) {
ic.deliverIngressObj(obj, 0, false) ic.deliverIngressObj(obj, 0, false)
}, },
)) ))
@@ -152,29 +152,29 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
// Federated informer on ingresses in members of federation. // Federated informer on ingresses in members of federation.
ic.ingressFederatedInformer = util.NewFederatedInformer( ic.ingressFederatedInformer = util.NewFederatedInformer(
client, client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) { func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Extensions().Ingresses(api.NamespaceAll).List(options) return targetClient.Extensions().Ingresses(api.NamespaceAll).List(options)
}, },
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return targetClient.Extensions().Ingresses(api.NamespaceAll).Watch(options) return targetClient.Extensions().Ingresses(api.NamespaceAll).Watch(options)
}, },
}, },
&extensions_v1beta1.Ingress{}, &extensionsv1beta1.Ingress{},
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it // Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some ingress operation succeeded. // would be just confirmation that some ingress operation succeeded.
util.NewTriggerOnAllChanges( util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) { func(obj pkgruntime.Object) {
ic.deliverIngressObj(obj, ic.ingressReviewDelay, false) ic.deliverIngressObj(obj, ic.ingressReviewDelay, false)
}, },
)) ))
}, },
&util.ClusterLifecycleHandlerFuncs{ &util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) { ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the ingresses again, and configure it's ingress controller's configmap with the correct UID // When new cluster becomes available process all the ingresses again, and configure it's ingress controller's configmap with the correct UID
ic.clusterDeliverer.DeliverAfter(cluster.Name, cluster, ic.clusterAvailableDelay) ic.clusterDeliverer.DeliverAfter(cluster.Name, cluster, ic.clusterAvailableDelay)
}, },
@@ -184,11 +184,11 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
// Federated informer on configmaps for ingress controllers in members of the federation. // Federated informer on configmaps for ingress controllers in members of the federation.
ic.configMapFederatedInformer = util.NewFederatedInformer( ic.configMapFederatedInformer = util.NewFederatedInformer(
client, client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) { func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
glog.V(4).Infof("Returning new informer for cluster %q", cluster.Name) glog.V(4).Infof("Returning new informer for cluster %q", cluster.Name)
return cache.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
if targetClient == nil { if targetClient == nil {
glog.Errorf("Internal error: targetClient is nil") glog.Errorf("Internal error: targetClient is nil")
} }
@@ -206,14 +206,14 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
// Trigger reconcilation whenever the ingress controller's configmap in a federated cluster is changed. In most cases it // Trigger reconcilation whenever the ingress controller's configmap in a federated cluster is changed. In most cases it
// would be just confirmation that the configmap for the ingress controller is correct. // would be just confirmation that the configmap for the ingress controller is correct.
util.NewTriggerOnAllChanges( util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) { func(obj pkgruntime.Object) {
ic.deliverConfigMapObj(cluster.Name, obj, ic.configMapReviewDelay, false) ic.deliverConfigMapObj(cluster.Name, obj, ic.configMapReviewDelay, false)
}, },
)) ))
}, },
&util.ClusterLifecycleHandlerFuncs{ &util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) { ClusterAvailable: func(cluster *federationapi.Cluster) {
ic.clusterDeliverer.DeliverAfter(cluster.Name, cluster, ic.clusterAvailableDelay) ic.clusterDeliverer.DeliverAfter(cluster.Name, cluster, ic.clusterAvailableDelay)
}, },
}, },
@@ -221,8 +221,8 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
// Federated ingress updater along with Create/Update/Delete operations. // Federated ingress updater along with Create/Update/Delete operations.
ic.federatedIngressUpdater = util.NewFederatedUpdater(ic.ingressFederatedInformer, ic.federatedIngressUpdater = util.NewFederatedUpdater(ic.ingressFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
ingress := obj.(*extensions_v1beta1.Ingress) ingress := obj.(*extensionsv1beta1.Ingress)
glog.V(4).Infof("Attempting to create Ingress: %v", ingress) glog.V(4).Infof("Attempting to create Ingress: %v", ingress)
_, err := client.Extensions().Ingresses(ingress.Namespace).Create(ingress) _, err := client.Extensions().Ingresses(ingress.Namespace).Create(ingress)
if err != nil { if err != nil {
@@ -232,8 +232,8 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
} }
return err return err
}, },
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
ingress := obj.(*extensions_v1beta1.Ingress) ingress := obj.(*extensionsv1beta1.Ingress)
glog.V(4).Infof("Attempting to update Ingress: %v", ingress) glog.V(4).Infof("Attempting to update Ingress: %v", ingress)
_, err := client.Extensions().Ingresses(ingress.Namespace).Update(ingress) _, err := client.Extensions().Ingresses(ingress.Namespace).Update(ingress)
if err != nil { if err != nil {
@@ -243,8 +243,8 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
} }
return err return err
}, },
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
ingress := obj.(*extensions_v1beta1.Ingress) ingress := obj.(*extensionsv1beta1.Ingress)
glog.V(4).Infof("Attempting to delete Ingress: %v", ingress) glog.V(4).Infof("Attempting to delete Ingress: %v", ingress)
err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &v1.DeleteOptions{}) err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &v1.DeleteOptions{})
return err return err
@@ -252,14 +252,14 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
// Federated configmap updater along with Create/Update/Delete operations. Only Update should ever be called. // Federated configmap updater along with Create/Update/Delete operations. Only Update should ever be called.
ic.federatedConfigMapUpdater = util.NewFederatedUpdater(ic.configMapFederatedInformer, ic.federatedConfigMapUpdater = util.NewFederatedUpdater(ic.configMapFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configMap := obj.(*v1.ConfigMap) configMap := obj.(*v1.ConfigMap)
configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace} configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace}
glog.Errorf("Internal error: Incorrectly attempting to create ConfigMap: %q", configMapName) glog.Errorf("Internal error: Incorrectly attempting to create ConfigMap: %q", configMapName)
_, err := client.Core().ConfigMaps(configMap.Namespace).Create(configMap) _, err := client.Core().ConfigMaps(configMap.Namespace).Create(configMap)
return err return err
}, },
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configMap := obj.(*v1.ConfigMap) configMap := obj.(*v1.ConfigMap)
configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace} configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace}
glog.V(4).Infof("Attempting to update ConfigMap: %v", configMap) glog.V(4).Infof("Attempting to update ConfigMap: %v", configMap)
@@ -271,7 +271,7 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
} }
return err return err
}, },
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configMap := obj.(*v1.ConfigMap) configMap := obj.(*v1.ConfigMap)
configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace} configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace}
glog.Errorf("Internal error: Incorrectly attempting to delete ConfigMap: %q", configMapName) glog.Errorf("Internal error: Incorrectly attempting to delete ConfigMap: %q", configMapName)
@@ -284,8 +284,8 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
ic.removeFinalizerFunc, ic.removeFinalizerFunc,
ic.addFinalizerFunc, ic.addFinalizerFunc,
// objNameFunc // objNameFunc
func(obj pkg_runtime.Object) string { func(obj pkgruntime.Object) string {
ingress := obj.(*extensions_v1beta1.Ingress) ingress := obj.(*extensionsv1beta1.Ingress)
return ingress.Name return ingress.Name
}, },
ic.updateTimeout, ic.updateTimeout,
@@ -297,8 +297,8 @@ func NewIngressController(client federationclientset.Interface) *IngressControll
} }
// Returns true if the given object has the given finalizer in its ObjectMeta. // Returns true if the given object has the given finalizer in its ObjectMeta.
func (ic *IngressController) hasFinalizerFunc(obj pkg_runtime.Object, finalizer string) bool { func (ic *IngressController) hasFinalizerFunc(obj pkgruntime.Object, finalizer string) bool {
ingress := obj.(*extensions_v1beta1.Ingress) ingress := obj.(*extensionsv1beta1.Ingress)
for i := range ingress.ObjectMeta.Finalizers { for i := range ingress.ObjectMeta.Finalizers {
if string(ingress.ObjectMeta.Finalizers[i]) == finalizer { if string(ingress.ObjectMeta.Finalizers[i]) == finalizer {
return true return true
@@ -309,8 +309,8 @@ func (ic *IngressController) hasFinalizerFunc(obj pkg_runtime.Object, finalizer
// Removes the finalizer from the given objects ObjectMeta. // Removes the finalizer from the given objects ObjectMeta.
// Assumes that the given object is a ingress. // Assumes that the given object is a ingress.
func (ic *IngressController) removeFinalizerFunc(obj pkg_runtime.Object, finalizer string) (pkg_runtime.Object, error) { func (ic *IngressController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
ingress := obj.(*extensions_v1beta1.Ingress) ingress := obj.(*extensionsv1beta1.Ingress)
newFinalizers := []string{} newFinalizers := []string{}
hasFinalizer := false hasFinalizer := false
for i := range ingress.ObjectMeta.Finalizers { for i := range ingress.ObjectMeta.Finalizers {
@@ -334,8 +334,8 @@ func (ic *IngressController) removeFinalizerFunc(obj pkg_runtime.Object, finaliz
// Adds the given finalizer to the given objects ObjectMeta. // Adds the given finalizer to the given objects ObjectMeta.
// Assumes that the given object is a ingress. // Assumes that the given object is a ingress.
func (ic *IngressController) addFinalizerFunc(obj pkg_runtime.Object, finalizer string) (pkg_runtime.Object, error) { func (ic *IngressController) addFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
ingress := obj.(*extensions_v1beta1.Ingress) ingress := obj.(*extensionsv1beta1.Ingress)
ingress.ObjectMeta.Finalizers = append(ingress.ObjectMeta.Finalizers, finalizer) ingress.ObjectMeta.Finalizers = append(ingress.ObjectMeta.Finalizers, finalizer)
ingress, err := ic.federatedApiClient.Extensions().Ingresses(ingress.Namespace).Update(ingress) ingress, err := ic.federatedApiClient.Extensions().Ingresses(ingress.Namespace).Update(ingress)
if err != nil { if err != nil {
@@ -394,7 +394,7 @@ func (ic *IngressController) Run(stopChan <-chan struct{}) {
} }
func (ic *IngressController) deliverIngressObj(obj interface{}, delay time.Duration, failed bool) { func (ic *IngressController) deliverIngressObj(obj interface{}, delay time.Duration, failed bool) {
ingress := obj.(*extensions_v1beta1.Ingress) ingress := obj.(*extensionsv1beta1.Ingress)
ic.deliverIngress(types.NamespacedName{Namespace: ingress.Namespace, Name: ingress.Name}, delay, failed) ic.deliverIngress(types.NamespacedName{Namespace: ingress.Namespace, Name: ingress.Name}, delay, failed)
} }
@@ -474,7 +474,7 @@ func (ic *IngressController) reconcileIngressesOnClusterChange(clusterName strin
} }
for _, obj := range ingressList { for _, obj := range ingressList {
ingress := obj.(*extensions_v1beta1.Ingress) ingress := obj.(*extensionsv1beta1.Ingress)
nsName := types.NamespacedName{Name: ingress.Name, Namespace: ingress.Namespace} nsName := types.NamespacedName{Name: ingress.Name, Namespace: ingress.Namespace}
glog.V(4).Infof("Delivering federated ingress %q for cluster %q", nsName, clusterName) glog.V(4).Infof("Delivering federated ingress %q for cluster %q", nsName, clusterName)
ic.deliverIngress(nsName, ic.smallDelay, false) ic.deliverIngress(nsName, ic.smallDelay, false)
@@ -543,7 +543,7 @@ func (ic *IngressController) reconcileConfigMapForCluster(clusterName string) {
In cases 2 and 3, the configmaps will be updated in the next cycle, triggered by the federation cluster update(s) In cases 2 and 3, the configmaps will be updated in the next cycle, triggered by the federation cluster update(s)
*/ */
func (ic *IngressController) reconcileConfigMap(cluster *federation_api.Cluster, configMap *v1.ConfigMap) { func (ic *IngressController) reconcileConfigMap(cluster *federationapi.Cluster, configMap *v1.ConfigMap) {
ic.Lock() // TODO: Reduce the scope of this master election lock. ic.Lock() // TODO: Reduce the scope of this master election lock.
defer ic.Unlock() defer ic.Unlock()
@@ -586,7 +586,7 @@ func (ic *IngressController) reconcileConfigMap(cluster *federation_api.Cluster,
If there is no elected master cluster, an error is returned. If there is no elected master cluster, an error is returned.
All other clusters must use the ingress UID of the elected master. All other clusters must use the ingress UID of the elected master.
*/ */
func (ic *IngressController) getMasterCluster() (master *federation_api.Cluster, ingressUID string, err error) { func (ic *IngressController) getMasterCluster() (master *federationapi.Cluster, ingressUID string, err error) {
clusters, err := ic.configMapFederatedInformer.GetReadyClusters() clusters, err := ic.configMapFederatedInformer.GetReadyClusters()
if err != nil { if err != nil {
glog.Errorf("Failed to get cluster list: %v", err) glog.Errorf("Failed to get cluster list: %v", err)
@@ -607,10 +607,10 @@ func (ic *IngressController) getMasterCluster() (master *federation_api.Cluster,
updateClusterIngressUIDToMasters takes the ingress UID annotation on the master cluster and applies it to cluster. updateClusterIngressUIDToMasters takes the ingress UID annotation on the master cluster and applies it to cluster.
If there is no master cluster, then fallbackUID is used (and hence this cluster becomes the master). If there is no master cluster, then fallbackUID is used (and hence this cluster becomes the master).
*/ */
func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federation_api.Cluster, fallbackUID string) { func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federationapi.Cluster, fallbackUID string) {
masterCluster, masterUID, err := ic.getMasterCluster() masterCluster, masterUID, err := ic.getMasterCluster()
clusterObj, clusterErr := conversion.NewCloner().DeepCopy(cluster) // Make a clone so that we don't clobber our input param clusterObj, clusterErr := conversion.NewCloner().DeepCopy(cluster) // Make a clone so that we don't clobber our input param
cluster, ok := clusterObj.(*federation_api.Cluster) cluster, ok := clusterObj.(*federationapi.Cluster)
if clusterErr != nil || !ok { if clusterErr != nil || !ok {
glog.Errorf("Internal error: Failed clone cluster resource while attempting to add master ingress UID annotation (%q = %q) from master cluster %q to cluster %q, will try again later: %v", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name, err) glog.Errorf("Internal error: Failed clone cluster resource while attempting to add master ingress UID annotation (%q = %q) from master cluster %q to cluster %q, will try again later: %v", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name, err)
return return
@@ -655,7 +655,7 @@ func (ic *IngressController) isClusterReady(clusterName string) bool {
// updateAnnotationOnIngress updates the annotation with the given key on the given federated ingress. // updateAnnotationOnIngress updates the annotation with the given key on the given federated ingress.
// Queues the ingress for resync when done. // Queues the ingress for resync when done.
func (ic *IngressController) updateAnnotationOnIngress(ingress *extensions_v1beta1.Ingress, key, value string) { func (ic *IngressController) updateAnnotationOnIngress(ingress *extensionsv1beta1.Ingress, key, value string) {
if ingress.ObjectMeta.Annotations == nil { if ingress.ObjectMeta.Annotations == nil {
ingress.ObjectMeta.Annotations = make(map[string]string) ingress.ObjectMeta.Annotations = make(map[string]string)
} }
@@ -693,9 +693,9 @@ func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
return return
} }
baseIngressObj, err := conversion.NewCloner().DeepCopy(baseIngressObjFromStore) baseIngressObj, err := conversion.NewCloner().DeepCopy(baseIngressObjFromStore)
baseIngress, ok := baseIngressObj.(*extensions_v1beta1.Ingress) baseIngress, ok := baseIngressObj.(*extensionsv1beta1.Ingress)
if err != nil || !ok { if err != nil || !ok {
glog.Errorf("Internal Error %v : Object retrieved from ingressInformerStore with key %q is not of correct type *extensions_v1beta1.Ingress: %v", err, key, baseIngressObj) glog.Errorf("Internal Error %v : Object retrieved from ingressInformerStore with key %q is not of correct type *extensionsv1beta1.Ingress: %v", err, key, baseIngressObj)
} else { } else {
glog.V(4).Infof("Base (federated) ingress: %v", baseIngress) glog.V(4).Infof("Base (federated) ingress: %v", baseIngress)
} }
@@ -720,7 +720,7 @@ func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
ic.deliverIngress(ingress, 0, true) ic.deliverIngress(ingress, 0, true)
return return
} }
baseIngress = updatedIngressObj.(*extensions_v1beta1.Ingress) baseIngress = updatedIngressObj.(*extensionsv1beta1.Ingress)
glog.V(3).Infof("Syncing ingress %s in underlying clusters", baseIngress.Name) glog.V(3).Infof("Syncing ingress %s in underlying clusters", baseIngress.Name)
@@ -744,7 +744,7 @@ func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
ic.deliverIngress(ingress, 0, true) ic.deliverIngress(ingress, 0, true)
return return
} }
desiredIngress := &extensions_v1beta1.Ingress{} desiredIngress := &extensionsv1beta1.Ingress{}
objMeta, err := conversion.NewCloner().DeepCopy(baseIngress.ObjectMeta) objMeta, err := conversion.NewCloner().DeepCopy(baseIngress.ObjectMeta)
if err != nil { if err != nil {
glog.Errorf("Error deep copying ObjectMeta: %v", err) glog.Errorf("Error deep copying ObjectMeta: %v", err)
@@ -757,9 +757,9 @@ func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
if !ok { if !ok {
glog.Errorf("Internal error: Failed to cast to v1.ObjectMeta: %v", objMeta) glog.Errorf("Internal error: Failed to cast to v1.ObjectMeta: %v", objMeta)
} }
desiredIngress.Spec = objSpec.(extensions_v1beta1.IngressSpec) desiredIngress.Spec = objSpec.(extensionsv1beta1.IngressSpec)
if !ok { if !ok {
glog.Errorf("Internal error: Failed to cast to extensions_v1beta1.Ingressespec: %v", objSpec) glog.Errorf("Internal error: Failed to cast to extensionsv1beta1.Ingressespec: %v", objSpec)
} }
glog.V(4).Infof("Desired Ingress: %v", desiredIngress) glog.V(4).Infof("Desired Ingress: %v", desiredIngress)
@@ -799,7 +799,7 @@ func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
glog.V(4).Infof("No annotation %q exists on ingress %q in federation and waiting for ingress in cluster %s. Not queueing create operation for ingress until annotation exists", staticIPNameKeyWritable, ingress, firstClusterName) glog.V(4).Infof("No annotation %q exists on ingress %q in federation and waiting for ingress in cluster %s. Not queueing create operation for ingress until annotation exists", staticIPNameKeyWritable, ingress, firstClusterName)
} }
} else { } else {
clusterIngress := clusterIngressObj.(*extensions_v1beta1.Ingress) clusterIngress := clusterIngressObj.(*extensionsv1beta1.Ingress)
glog.V(4).Infof("Found existing Ingress %s in cluster %s - checking if update is required (in either direction)", ingress, cluster.Name) glog.V(4).Infof("Found existing Ingress %s in cluster %s - checking if update is required (in either direction)", ingress, cluster.Name)
clusterIPName, clusterIPNameExists := clusterIngress.ObjectMeta.Annotations[staticIPNameKeyReadonly] clusterIPName, clusterIPNameExists := clusterIngress.ObjectMeta.Annotations[staticIPNameKeyReadonly]
baseLBStatusExists := len(baseIngress.Status.LoadBalancer.Ingress) > 0 baseLBStatusExists := len(baseIngress.Status.LoadBalancer.Ingress) > 0
@@ -898,7 +898,7 @@ func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
} }
// delete deletes the given ingress or returns error if the deletion was not complete. // delete deletes the given ingress or returns error if the deletion was not complete.
func (ic *IngressController) delete(ingress *extensions_v1beta1.Ingress) error { func (ic *IngressController) delete(ingress *extensionsv1beta1.Ingress) error {
glog.V(3).Infof("Handling deletion of ingress: %v", *ingress) glog.V(3).Infof("Handling deletion of ingress: %v", *ingress)
_, err := ic.deletionHelper.HandleObjectInUnderlyingClusters(ingress) _, err := ic.deletionHelper.HandleObjectInUnderlyingClusters(ingress)
if err != nil { if err != nil {

View File

@@ -22,17 +22,17 @@ import (
"testing" "testing"
"time" "time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fake_fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake" fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util" "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper" "k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test" . "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensions_v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" extensionsv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@@ -41,40 +41,40 @@ import (
) )
func TestIngressController(t *testing.T) { func TestIngressController(t *testing.T) {
fakeClusterList := federation_api.ClusterList{Items: []federation_api.Cluster{}} fakeClusterList := federationapi.ClusterList{Items: []federationapi.Cluster{}}
fakeConfigMapList1 := api_v1.ConfigMapList{Items: []api_v1.ConfigMap{}} fakeConfigMapList1 := apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}}
fakeConfigMapList2 := api_v1.ConfigMapList{Items: []api_v1.ConfigMap{}} fakeConfigMapList2 := apiv1.ConfigMapList{Items: []apiv1.ConfigMap{}}
cluster1 := NewCluster("cluster1", api_v1.ConditionTrue) cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", api_v1.ConditionTrue) cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
cfg1 := NewConfigMap("foo") cfg1 := NewConfigMap("foo")
cfg2 := NewConfigMap("bar") // Different UID from cfg1, so that we can check that they get reconciled. cfg2 := NewConfigMap("bar") // Different UID from cfg1, so that we can check that they get reconciled.
t.Log("Creating fake infrastructure") t.Log("Creating fake infrastructure")
fedClient := &fake_fedclientset.Clientset{} fedClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fedClient.Fake, &fakeClusterList) RegisterFakeList("clusters", &fedClient.Fake, &fakeClusterList)
RegisterFakeList("ingresses", &fedClient.Fake, &extensions_v1beta1.IngressList{Items: []extensions_v1beta1.Ingress{}}) RegisterFakeList("ingresses", &fedClient.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}})
fedIngressWatch := RegisterFakeWatch("ingresses", &fedClient.Fake) fedIngressWatch := RegisterFakeWatch("ingresses", &fedClient.Fake)
clusterWatch := RegisterFakeWatch("clusters", &fedClient.Fake) clusterWatch := RegisterFakeWatch("clusters", &fedClient.Fake)
fedClusterUpdateChan := RegisterFakeCopyOnUpdate("clusters", &fedClient.Fake, clusterWatch) fedClusterUpdateChan := RegisterFakeCopyOnUpdate("clusters", &fedClient.Fake, clusterWatch)
//fedIngressUpdateChan := RegisterFakeCopyOnUpdate("ingresses", &fedClient.Fake, fedIngressWatch) //fedIngressUpdateChan := RegisterFakeCopyOnUpdate("ingresses", &fedClient.Fake, fedIngressWatch)
cluster1Client := &fake_kubeclientset.Clientset{} cluster1Client := &fakekubeclientset.Clientset{}
RegisterFakeList("ingresses", &cluster1Client.Fake, &extensions_v1beta1.IngressList{Items: []extensions_v1beta1.Ingress{}}) RegisterFakeList("ingresses", &cluster1Client.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}})
RegisterFakeList("configmaps", &cluster1Client.Fake, &fakeConfigMapList1) RegisterFakeList("configmaps", &cluster1Client.Fake, &fakeConfigMapList1)
cluster1IngressWatch := RegisterFakeWatch("ingresses", &cluster1Client.Fake) cluster1IngressWatch := RegisterFakeWatch("ingresses", &cluster1Client.Fake)
cluster1ConfigMapWatch := RegisterFakeWatch("configmaps", &cluster1Client.Fake) cluster1ConfigMapWatch := RegisterFakeWatch("configmaps", &cluster1Client.Fake)
cluster1IngressCreateChan := RegisterFakeCopyOnCreate("ingresses", &cluster1Client.Fake, cluster1IngressWatch) cluster1IngressCreateChan := RegisterFakeCopyOnCreate("ingresses", &cluster1Client.Fake, cluster1IngressWatch)
// cluster1IngressUpdateChan := RegisterFakeCopyOnUpdate("ingresses", &cluster1Client.Fake, cluster1IngressWatch) // cluster1IngressUpdateChan := RegisterFakeCopyOnUpdate("ingresses", &cluster1Client.Fake, cluster1IngressWatch)
cluster2Client := &fake_kubeclientset.Clientset{} cluster2Client := &fakekubeclientset.Clientset{}
RegisterFakeList("ingresses", &cluster2Client.Fake, &extensions_v1beta1.IngressList{Items: []extensions_v1beta1.Ingress{}}) RegisterFakeList("ingresses", &cluster2Client.Fake, &extensionsv1beta1.IngressList{Items: []extensionsv1beta1.Ingress{}})
RegisterFakeList("configmaps", &cluster2Client.Fake, &fakeConfigMapList2) RegisterFakeList("configmaps", &cluster2Client.Fake, &fakeConfigMapList2)
cluster2IngressWatch := RegisterFakeWatch("ingresses", &cluster2Client.Fake) cluster2IngressWatch := RegisterFakeWatch("ingresses", &cluster2Client.Fake)
cluster2ConfigMapWatch := RegisterFakeWatch("configmaps", &cluster2Client.Fake) cluster2ConfigMapWatch := RegisterFakeWatch("configmaps", &cluster2Client.Fake)
cluster2IngressCreateChan := RegisterFakeCopyOnCreate("ingresses", &cluster2Client.Fake, cluster2IngressWatch) cluster2IngressCreateChan := RegisterFakeCopyOnCreate("ingresses", &cluster2Client.Fake, cluster2IngressWatch)
cluster2ConfigMapUpdateChan := RegisterFakeCopyOnUpdate("configmaps", &cluster2Client.Fake, cluster2ConfigMapWatch) cluster2ConfigMapUpdateChan := RegisterFakeCopyOnUpdate("configmaps", &cluster2Client.Fake, cluster2ConfigMapWatch)
clientFactoryFunc := func(cluster *federation_api.Cluster) (kubeclientset.Interface, error) { clientFactoryFunc := func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name { switch cluster.Name {
case cluster1.Name: case cluster1.Name:
return cluster1Client, nil return cluster1Client, nil
@@ -102,8 +102,8 @@ func TestIngressController(t *testing.T) {
// TODO: Here we are creating the ingress with first cluster annotation. // TODO: Here we are creating the ingress with first cluster annotation.
// Add another test without that annotation when // Add another test without that annotation when
// https://github.com/kubernetes/kubernetes/issues/36540 is fixed. // https://github.com/kubernetes/kubernetes/issues/36540 is fixed.
ing1 := extensions_v1beta1.Ingress{ ing1 := extensionsv1beta1.Ingress{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Name: "test-ingress", Name: "test-ingress",
Namespace: "mynamespace", Namespace: "mynamespace",
SelfLink: "/api/v1/namespaces/mynamespace/ingress/test-ingress", SelfLink: "/api/v1/namespaces/mynamespace/ingress/test-ingress",
@@ -111,9 +111,9 @@ func TestIngressController(t *testing.T) {
firstClusterAnnotation: cluster1.Name, firstClusterAnnotation: cluster1.Name,
}, },
}, },
Status: extensions_v1beta1.IngressStatus{ Status: extensionsv1beta1.IngressStatus{
LoadBalancer: api_v1.LoadBalancerStatus{ LoadBalancer: apiv1.LoadBalancerStatus{
Ingress: make([]api_v1.LoadBalancerIngress, 0, 0), Ingress: make([]apiv1.LoadBalancerIngress, 0, 0),
}, },
}, },
} }
@@ -139,7 +139,7 @@ func TestIngressController(t *testing.T) {
updatedIngress := GetIngressFromChan(t, fedIngressUpdateChan) updatedIngress := GetIngressFromChan(t, fedIngressUpdateChan)
assert.True(t, ingressController.hasFinalizerFunc(updatedIngress, deletionhelper.FinalizerDeleteFromUnderlyingClusters)) assert.True(t, ingressController.hasFinalizerFunc(updatedIngress, deletionhelper.FinalizerDeleteFromUnderlyingClusters))
updatedIngress = GetIngressFromChan(t, fedIngressUpdateChan) updatedIngress = GetIngressFromChan(t, fedIngressUpdateChan)
assert.True(t, ingressController.hasFinalizerFunc(updatedIngress, api_v1.FinalizerOrphan), fmt.Sprintf("ingress does not have the orphan finalizer: %v", updatedIngress)) assert.True(t, ingressController.hasFinalizerFunc(updatedIngress, apiv1.FinalizerOrphan), fmt.Sprintf("ingress does not have the orphan finalizer: %v", updatedIngress))
ing1 = *updatedIngress ing1 = *updatedIngress
*/ */
t.Log("Checking that Ingress was correctly created in cluster 1") t.Log("Checking that Ingress was correctly created in cluster 1")
@@ -159,7 +159,7 @@ func TestIngressController(t *testing.T) {
// TODO: Re-enable this when we have fixed these flaky tests: https://github.com/kubernetes/kubernetes/issues/36540. // TODO: Re-enable this when we have fixed these flaky tests: https://github.com/kubernetes/kubernetes/issues/36540.
// Test that IP address gets transferred from cluster ingress to federated ingress. // Test that IP address gets transferred from cluster ingress to federated ingress.
t.Log("Checking that IP address gets transferred from cluster ingress to federated ingress") t.Log("Checking that IP address gets transferred from cluster ingress to federated ingress")
createdIngress.Status.LoadBalancer.Ingress = append(createdIngress.Status.LoadBalancer.Ingress, api_v1.LoadBalancerIngress{IP: "1.2.3.4"}) createdIngress.Status.LoadBalancer.Ingress = append(createdIngress.Status.LoadBalancer.Ingress, apiv1.LoadBalancerIngress{IP: "1.2.3.4"})
cluster1IngressWatch.Modify(createdIngress) cluster1IngressWatch.Modify(createdIngress)
// Wait for store to see the updated cluster ingress. // Wait for store to see the updated cluster ingress.
assert.NoError(t, WaitForStatusUpdate(t, ingressController.ingressFederatedInformer.GetTargetStore(), assert.NoError(t, WaitForStatusUpdate(t, ingressController.ingressFederatedInformer.GetTargetStore(),
@@ -210,28 +210,28 @@ func TestIngressController(t *testing.T) {
close(stop) close(stop)
} }
func GetIngressFromChan(t *testing.T, c chan runtime.Object) *extensions_v1beta1.Ingress { func GetIngressFromChan(t *testing.T, c chan runtime.Object) *extensionsv1beta1.Ingress {
obj := GetObjectFromChan(c) obj := GetObjectFromChan(c)
ingress, ok := obj.(*extensions_v1beta1.Ingress) ingress, ok := obj.(*extensionsv1beta1.Ingress)
if !ok { if !ok {
t.Logf("Object on channel was not of type *extensions_v1beta1.Ingress: %v", obj) t.Logf("Object on channel was not of type *extensionsv1beta1.Ingress: %v", obj)
} }
return ingress return ingress
} }
func GetConfigMapFromChan(c chan runtime.Object) *api_v1.ConfigMap { func GetConfigMapFromChan(c chan runtime.Object) *apiv1.ConfigMap {
configMap, _ := GetObjectFromChan(c).(*api_v1.ConfigMap) configMap, _ := GetObjectFromChan(c).(*apiv1.ConfigMap)
return configMap return configMap
} }
func GetClusterFromChan(c chan runtime.Object) *federation_api.Cluster { func GetClusterFromChan(c chan runtime.Object) *federationapi.Cluster {
cluster, _ := GetObjectFromChan(c).(*federation_api.Cluster) cluster, _ := GetObjectFromChan(c).(*federationapi.Cluster)
return cluster return cluster
} }
func NewConfigMap(uid string) *api_v1.ConfigMap { func NewConfigMap(uid string) *apiv1.ConfigMap {
return &api_v1.ConfigMap{ return &apiv1.ConfigMap{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Name: uidConfigMapName, Name: uidConfigMapName,
Namespace: uidConfigMapNamespace, Namespace: uidConfigMapNamespace,
SelfLink: "/api/v1/namespaces/" + uidConfigMapNamespace + "/configmap/" + uidConfigMapName, SelfLink: "/api/v1/namespaces/" + uidConfigMapNamespace + "/configmap/" + uidConfigMapName,
@@ -252,8 +252,8 @@ func WaitForFinalizersInFederationStore(ingressController *IngressController, st
if !found || err != nil { if !found || err != nil {
return false, err return false, err
} }
ingress := obj.(*extensions_v1beta1.Ingress) ingress := obj.(*extensionsv1beta1.Ingress)
if ingressController.hasFinalizerFunc(ingress, api_v1.FinalizerOrphan) && if ingressController.hasFinalizerFunc(ingress, apiv1.FinalizerOrphan) &&
ingressController.hasFinalizerFunc(ingress, deletionhelper.FinalizerDeleteFromUnderlyingClusters) { ingressController.hasFinalizerFunc(ingress, deletionhelper.FinalizerDeleteFromUnderlyingClusters) {
return true, nil return true, nil
} }
@@ -280,14 +280,14 @@ func WaitForIngressInClusterStore(store util.FederatedReadOnlyStore, clusterName
} }
// Wait for ingress status to be updated to match the desiredStatus. // Wait for ingress status to be updated to match the desiredStatus.
func WaitForStatusUpdate(t *testing.T, store util.FederatedReadOnlyStore, clusterName, key string, desiredStatus api_v1.LoadBalancerStatus, timeout time.Duration) error { func WaitForStatusUpdate(t *testing.T, store util.FederatedReadOnlyStore, clusterName, key string, desiredStatus apiv1.LoadBalancerStatus, timeout time.Duration) error {
retryInterval := 100 * time.Millisecond retryInterval := 100 * time.Millisecond
err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) { err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
obj, found, err := store.GetByKey(clusterName, key) obj, found, err := store.GetByKey(clusterName, key)
if !found || err != nil { if !found || err != nil {
return false, err return false, err
} }
ingress := obj.(*extensions_v1beta1.Ingress) ingress := obj.(*extensionsv1beta1.Ingress)
return reflect.DeepEqual(ingress.Status.LoadBalancer, desiredStatus), nil return reflect.DeepEqual(ingress.Status.LoadBalancer, desiredStatus), nil
}) })
return err return err

View File

@@ -20,14 +20,14 @@ import (
"fmt" "fmt"
"time" "time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util" "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper" "k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink" "k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
@@ -84,7 +84,7 @@ type NamespaceController struct {
func NewNamespaceController(client federationclientset.Interface) *NamespaceController { func NewNamespaceController(client federationclientset.Interface) *NamespaceController {
broadcaster := record.NewBroadcaster() broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client)) broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(api_v1.EventSource{Component: "federated-namespace-controller"}) recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-namespace-controller"})
nc := &NamespaceController{ nc := &NamespaceController{
federatedApiClient: client, federatedApiClient: client,
@@ -103,31 +103,31 @@ func NewNamespaceController(client federationclientset.Interface) *NamespaceCont
// Start informer in federated API servers on namespaces that should be federated. // Start informer in federated API servers on namespaces that should be federated.
nc.namespaceInformerStore, nc.namespaceInformerController = cache.NewInformer( nc.namespaceInformerStore, nc.namespaceInformerController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (runtime.Object, error) { ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) {
return client.Core().Namespaces().List(options) return client.Core().Namespaces().List(options)
}, },
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return client.Core().Namespaces().Watch(options) return client.Core().Namespaces().Watch(options)
}, },
}, },
&api_v1.Namespace{}, &apiv1.Namespace{},
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj runtime.Object) { nc.deliverNamespaceObj(obj, 0, false) })) util.NewTriggerOnAllChanges(func(obj runtime.Object) { nc.deliverNamespaceObj(obj, 0, false) }))
// Federated informer on namespaces in members of federation. // Federated informer on namespaces in members of federation.
nc.namespaceFederatedInformer = util.NewFederatedInformer( nc.namespaceFederatedInformer = util.NewFederatedInformer(
client, client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) { func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (runtime.Object, error) { ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) {
return targetClient.Core().Namespaces().List(options) return targetClient.Core().Namespaces().List(options)
}, },
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return targetClient.Core().Namespaces().Watch(options) return targetClient.Core().Namespaces().Watch(options)
}, },
}, },
&api_v1.Namespace{}, &apiv1.Namespace{},
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it // Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some namespace opration succeeded. // would be just confirmation that some namespace opration succeeded.
@@ -136,7 +136,7 @@ func NewNamespaceController(client federationclientset.Interface) *NamespaceCont
)) ))
}, },
&util.ClusterLifecycleHandlerFuncs{ &util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) { ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the namespaces again. // When new cluster becomes available process all the namespaces again.
nc.clusterDeliverer.DeliverAfter(allClustersKey, nil, nc.clusterAvailableDelay) nc.clusterDeliverer.DeliverAfter(allClustersKey, nil, nc.clusterAvailableDelay)
}, },
@@ -146,18 +146,18 @@ func NewNamespaceController(client federationclientset.Interface) *NamespaceCont
// Federated updeater along with Create/Update/Delete operations. // Federated updeater along with Create/Update/Delete operations.
nc.federatedUpdater = util.NewFederatedUpdater(nc.namespaceFederatedInformer, nc.federatedUpdater = util.NewFederatedUpdater(nc.namespaceFederatedInformer,
func(client kubeclientset.Interface, obj runtime.Object) error { func(client kubeclientset.Interface, obj runtime.Object) error {
namespace := obj.(*api_v1.Namespace) namespace := obj.(*apiv1.Namespace)
_, err := client.Core().Namespaces().Create(namespace) _, err := client.Core().Namespaces().Create(namespace)
return err return err
}, },
func(client kubeclientset.Interface, obj runtime.Object) error { func(client kubeclientset.Interface, obj runtime.Object) error {
namespace := obj.(*api_v1.Namespace) namespace := obj.(*apiv1.Namespace)
_, err := client.Core().Namespaces().Update(namespace) _, err := client.Core().Namespaces().Update(namespace)
return err return err
}, },
func(client kubeclientset.Interface, obj runtime.Object) error { func(client kubeclientset.Interface, obj runtime.Object) error {
namespace := obj.(*api_v1.Namespace) namespace := obj.(*apiv1.Namespace)
err := client.Core().Namespaces().Delete(namespace.Name, &api_v1.DeleteOptions{}) err := client.Core().Namespaces().Delete(namespace.Name, &apiv1.DeleteOptions{})
// IsNotFound error is fine since that means the object is deleted already. // IsNotFound error is fine since that means the object is deleted already.
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
return nil return nil
@@ -171,7 +171,7 @@ func NewNamespaceController(client federationclientset.Interface) *NamespaceCont
nc.addFinalizerFunc, nc.addFinalizerFunc,
// objNameFunc // objNameFunc
func(obj runtime.Object) string { func(obj runtime.Object) string {
namespace := obj.(*api_v1.Namespace) namespace := obj.(*apiv1.Namespace)
return namespace.Name return namespace.Name
}, },
nc.updateTimeout, nc.updateTimeout,
@@ -184,7 +184,7 @@ func NewNamespaceController(client federationclientset.Interface) *NamespaceCont
// Returns true if the given object has the given finalizer in its ObjectMeta. // Returns true if the given object has the given finalizer in its ObjectMeta.
func (nc *NamespaceController) hasFinalizerFunc(obj runtime.Object, finalizer string) bool { func (nc *NamespaceController) hasFinalizerFunc(obj runtime.Object, finalizer string) bool {
namespace := obj.(*api_v1.Namespace) namespace := obj.(*apiv1.Namespace)
for i := range namespace.ObjectMeta.Finalizers { for i := range namespace.ObjectMeta.Finalizers {
if string(namespace.ObjectMeta.Finalizers[i]) == finalizer { if string(namespace.ObjectMeta.Finalizers[i]) == finalizer {
return true return true
@@ -196,7 +196,7 @@ func (nc *NamespaceController) hasFinalizerFunc(obj runtime.Object, finalizer st
// Removes the finalizer from the given objects ObjectMeta. // Removes the finalizer from the given objects ObjectMeta.
// Assumes that the given object is a namespace. // Assumes that the given object is a namespace.
func (nc *NamespaceController) removeFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) { func (nc *NamespaceController) removeFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) {
namespace := obj.(*api_v1.Namespace) namespace := obj.(*apiv1.Namespace)
newFinalizers := []string{} newFinalizers := []string{}
hasFinalizer := false hasFinalizer := false
for i := range namespace.ObjectMeta.Finalizers { for i := range namespace.ObjectMeta.Finalizers {
@@ -221,7 +221,7 @@ func (nc *NamespaceController) removeFinalizerFunc(obj runtime.Object, finalizer
// Adds the given finalizer to the given objects ObjectMeta. // Adds the given finalizer to the given objects ObjectMeta.
// Assumes that the given object is a namespace. // Assumes that the given object is a namespace.
func (nc *NamespaceController) addFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) { func (nc *NamespaceController) addFinalizerFunc(obj runtime.Object, finalizer string) (runtime.Object, error) {
namespace := obj.(*api_v1.Namespace) namespace := obj.(*apiv1.Namespace)
namespace.ObjectMeta.Finalizers = append(namespace.ObjectMeta.Finalizers, finalizer) namespace.ObjectMeta.Finalizers = append(namespace.ObjectMeta.Finalizers, finalizer)
namespace, err := nc.federatedApiClient.Core().Namespaces().Finalize(namespace) namespace, err := nc.federatedApiClient.Core().Namespaces().Finalize(namespace)
if err != nil { if err != nil {
@@ -231,8 +231,8 @@ func (nc *NamespaceController) addFinalizerFunc(obj runtime.Object, finalizer st
} }
// Returns true if the given object has the given finalizer in its NamespaceSpec. // Returns true if the given object has the given finalizer in its NamespaceSpec.
func (nc *NamespaceController) hasFinalizerFuncInSpec(obj runtime.Object, finalizer api_v1.FinalizerName) bool { func (nc *NamespaceController) hasFinalizerFuncInSpec(obj runtime.Object, finalizer apiv1.FinalizerName) bool {
namespace := obj.(*api_v1.Namespace) namespace := obj.(*apiv1.Namespace)
for i := range namespace.Spec.Finalizers { for i := range namespace.Spec.Finalizers {
if namespace.Spec.Finalizers[i] == finalizer { if namespace.Spec.Finalizers[i] == finalizer {
return true return true
@@ -242,8 +242,8 @@ func (nc *NamespaceController) hasFinalizerFuncInSpec(obj runtime.Object, finali
} }
// Removes the finalizer from the given objects NamespaceSpec. // Removes the finalizer from the given objects NamespaceSpec.
func (nc *NamespaceController) removeFinalizerFromSpec(namespace *api_v1.Namespace, finalizer api_v1.FinalizerName) (*api_v1.Namespace, error) { func (nc *NamespaceController) removeFinalizerFromSpec(namespace *apiv1.Namespace, finalizer apiv1.FinalizerName) (*apiv1.Namespace, error) {
updatedFinalizers := []api_v1.FinalizerName{} updatedFinalizers := []apiv1.FinalizerName{}
for i := range namespace.Spec.Finalizers { for i := range namespace.Spec.Finalizers {
if namespace.Spec.Finalizers[i] != finalizer { if namespace.Spec.Finalizers[i] != finalizer {
updatedFinalizers = append(updatedFinalizers, namespace.Spec.Finalizers[i]) updatedFinalizers = append(updatedFinalizers, namespace.Spec.Finalizers[i])
@@ -275,7 +275,7 @@ func (nc *NamespaceController) Run(stopChan <-chan struct{}) {
} }
func (nc *NamespaceController) deliverNamespaceObj(obj interface{}, delay time.Duration, failed bool) { func (nc *NamespaceController) deliverNamespaceObj(obj interface{}, delay time.Duration, failed bool) {
namespace := obj.(*api_v1.Namespace) namespace := obj.(*apiv1.Namespace)
nc.deliverNamespace(namespace.Name, delay, failed) nc.deliverNamespace(namespace.Name, delay, failed)
} }
@@ -314,7 +314,7 @@ func (nc *NamespaceController) reconcileNamespacesOnClusterChange() {
nc.clusterDeliverer.DeliverAfter(allClustersKey, nil, nc.clusterAvailableDelay) nc.clusterDeliverer.DeliverAfter(allClustersKey, nil, nc.clusterAvailableDelay)
} }
for _, obj := range nc.namespaceInformerStore.List() { for _, obj := range nc.namespaceInformerStore.List() {
namespace := obj.(*api_v1.Namespace) namespace := obj.(*apiv1.Namespace)
nc.deliverNamespace(namespace.Name, nc.smallDelay, false) nc.deliverNamespace(namespace.Name, nc.smallDelay, false)
} }
} }
@@ -339,7 +339,7 @@ func (nc *NamespaceController) reconcileNamespace(namespace string) {
// Create a copy before modifying the namespace to prevent race condition with // Create a copy before modifying the namespace to prevent race condition with
// other readers of namespace from store. // other readers of namespace from store.
namespaceObj, err := conversion.NewCloner().DeepCopy(namespaceObjFromStore) namespaceObj, err := conversion.NewCloner().DeepCopy(namespaceObjFromStore)
baseNamespace, ok := namespaceObj.(*api_v1.Namespace) baseNamespace, ok := namespaceObj.(*apiv1.Namespace)
if err != nil || !ok { if err != nil || !ok {
glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err) glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err)
nc.deliverNamespace(namespace, 0, true) nc.deliverNamespace(namespace, 0, true)
@@ -368,7 +368,7 @@ func (nc *NamespaceController) reconcileNamespace(namespace string) {
nc.deliverNamespace(namespace, 0, false) nc.deliverNamespace(namespace, 0, false)
return return
} }
baseNamespace = updatedNamespaceObj.(*api_v1.Namespace) baseNamespace = updatedNamespaceObj.(*apiv1.Namespace)
glog.V(3).Infof("Syncing namespace %s in underlying clusters", baseNamespace.Name) glog.V(3).Infof("Syncing namespace %s in underlying clusters", baseNamespace.Name)
// Sync the namespace in all underlying clusters. // Sync the namespace in all underlying clusters.
@@ -388,9 +388,9 @@ func (nc *NamespaceController) reconcileNamespace(namespace string) {
return return
} }
// The object should not be modified. // The object should not be modified.
desiredNamespace := &api_v1.Namespace{ desiredNamespace := &apiv1.Namespace{
ObjectMeta: util.DeepCopyRelevantObjectMeta(baseNamespace.ObjectMeta), ObjectMeta: util.DeepCopyRelevantObjectMeta(baseNamespace.ObjectMeta),
Spec: util.DeepCopyApiTypeOrPanic(baseNamespace.Spec).(api_v1.NamespaceSpec), Spec: util.DeepCopyApiTypeOrPanic(baseNamespace.Spec).(apiv1.NamespaceSpec),
} }
glog.V(5).Infof("Desired namespace in underlying clusters: %+v", desiredNamespace) glog.V(5).Infof("Desired namespace in underlying clusters: %+v", desiredNamespace)
@@ -404,7 +404,7 @@ func (nc *NamespaceController) reconcileNamespace(namespace string) {
ClusterName: cluster.Name, ClusterName: cluster.Name,
}) })
} else { } else {
clusterNamespace := clusterNamespaceObj.(*api_v1.Namespace) clusterNamespace := clusterNamespaceObj.(*apiv1.Namespace)
// Update existing namespace, if needed. // Update existing namespace, if needed.
if !util.ObjectMetaAndSpecEquivalent(desiredNamespace, clusterNamespace) { if !util.ObjectMetaAndSpecEquivalent(desiredNamespace, clusterNamespace) {
@@ -441,17 +441,17 @@ func (nc *NamespaceController) reconcileNamespace(namespace string) {
} }
// delete deletes the given namespace or returns error if the deletion was not complete. // delete deletes the given namespace or returns error if the deletion was not complete.
func (nc *NamespaceController) delete(namespace *api_v1.Namespace) error { func (nc *NamespaceController) delete(namespace *apiv1.Namespace) error {
// Set Terminating status. // Set Terminating status.
updatedNamespace := &api_v1.Namespace{ updatedNamespace := &apiv1.Namespace{
ObjectMeta: namespace.ObjectMeta, ObjectMeta: namespace.ObjectMeta,
Spec: namespace.Spec, Spec: namespace.Spec,
Status: api_v1.NamespaceStatus{ Status: apiv1.NamespaceStatus{
Phase: api_v1.NamespaceTerminating, Phase: apiv1.NamespaceTerminating,
}, },
} }
var err error var err error
if namespace.Status.Phase != api_v1.NamespaceTerminating { if namespace.Status.Phase != apiv1.NamespaceTerminating {
glog.V(2).Infof("Marking ns %s as terminating", namespace.Name) glog.V(2).Infof("Marking ns %s as terminating", namespace.Name)
nc.eventRecorder.Event(namespace, api.EventTypeNormal, "DeleteNamespace", fmt.Sprintf("Marking for deletion")) nc.eventRecorder.Event(namespace, api.EventTypeNormal, "DeleteNamespace", fmt.Sprintf("Marking for deletion"))
_, err = nc.federatedApiClient.Core().Namespaces().Update(updatedNamespace) _, err = nc.federatedApiClient.Core().Namespaces().Update(updatedNamespace)
@@ -460,7 +460,7 @@ func (nc *NamespaceController) delete(namespace *api_v1.Namespace) error {
} }
} }
if nc.hasFinalizerFuncInSpec(updatedNamespace, api_v1.FinalizerKubernetes) { if nc.hasFinalizerFuncInSpec(updatedNamespace, apiv1.FinalizerKubernetes) {
// Delete resources in this namespace. // Delete resources in this namespace.
updatedNamespace, err = nc.removeKubernetesFinalizer(updatedNamespace) updatedNamespace, err = nc.removeKubernetesFinalizer(updatedNamespace)
if err != nil { if err != nil {
@@ -488,42 +488,42 @@ func (nc *NamespaceController) delete(namespace *api_v1.Namespace) error {
} }
// Ensures that all resources in this namespace are deleted and then removes the kubernetes finalizer. // Ensures that all resources in this namespace are deleted and then removes the kubernetes finalizer.
func (nc *NamespaceController) removeKubernetesFinalizer(namespace *api_v1.Namespace) (*api_v1.Namespace, error) { func (nc *NamespaceController) removeKubernetesFinalizer(namespace *apiv1.Namespace) (*apiv1.Namespace, error) {
// Right now there are just 7 types of objects: Deployments, DaemonSets, ReplicaSet, Secret, Ingress, Events and Service. // Right now there are just 7 types of objects: Deployments, DaemonSets, ReplicaSet, Secret, Ingress, Events and Service.
// Temporarly these items are simply deleted one by one to squeeze this code into 1.4. // Temporarly these items are simply deleted one by one to squeeze this code into 1.4.
// TODO: Make it generic (like in the regular namespace controller) and parallel. // TODO: Make it generic (like in the regular namespace controller) and parallel.
err := nc.federatedApiClient.Core().Services(namespace.Name).DeleteCollection(&api_v1.DeleteOptions{}, api_v1.ListOptions{}) err := nc.federatedApiClient.Core().Services(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to delete service list: %v", err) return nil, fmt.Errorf("failed to delete service list: %v", err)
} }
err = nc.federatedApiClient.Extensions().ReplicaSets(namespace.Name).DeleteCollection(&api_v1.DeleteOptions{}, api_v1.ListOptions{}) err = nc.federatedApiClient.Extensions().ReplicaSets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to delete replicaset list from namespace: %v", err) return nil, fmt.Errorf("failed to delete replicaset list from namespace: %v", err)
} }
err = nc.federatedApiClient.Core().Secrets(namespace.Name).DeleteCollection(&api_v1.DeleteOptions{}, api_v1.ListOptions{}) err = nc.federatedApiClient.Core().Secrets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to delete secret list from namespace: %v", err) return nil, fmt.Errorf("failed to delete secret list from namespace: %v", err)
} }
err = nc.federatedApiClient.Extensions().Ingresses(namespace.Name).DeleteCollection(&api_v1.DeleteOptions{}, api_v1.ListOptions{}) err = nc.federatedApiClient.Extensions().Ingresses(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to delete ingresses list from namespace: %v", err) return nil, fmt.Errorf("failed to delete ingresses list from namespace: %v", err)
} }
err = nc.federatedApiClient.Extensions().DaemonSets(namespace.Name).DeleteCollection(&api_v1.DeleteOptions{}, api_v1.ListOptions{}) err = nc.federatedApiClient.Extensions().DaemonSets(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to delete daemonsets list from namespace: %v", err) return nil, fmt.Errorf("failed to delete daemonsets list from namespace: %v", err)
} }
err = nc.federatedApiClient.Extensions().Deployments(namespace.Name).DeleteCollection(&api_v1.DeleteOptions{}, api_v1.ListOptions{}) err = nc.federatedApiClient.Extensions().Deployments(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to delete deployments list from namespace: %v", err) return nil, fmt.Errorf("failed to delete deployments list from namespace: %v", err)
} }
err = nc.federatedApiClient.Core().Events(namespace.Name).DeleteCollection(&api_v1.DeleteOptions{}, api_v1.ListOptions{}) err = nc.federatedApiClient.Core().Events(namespace.Name).DeleteCollection(&apiv1.DeleteOptions{}, apiv1.ListOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to delete events list from namespace: %v", err) return nil, fmt.Errorf("failed to delete events list from namespace: %v", err)
} }
// Remove kube_api.FinalizerKubernetes // Remove kube_api.FinalizerKubernetes
if len(namespace.Spec.Finalizers) != 0 { if len(namespace.Spec.Finalizers) != 0 {
return nc.removeFinalizerFromSpec(namespace, api_v1.FinalizerKubernetes) return nc.removeFinalizerFromSpec(namespace, apiv1.FinalizerKubernetes)
} }
return namespace, nil return namespace, nil
} }

View File

@@ -21,16 +21,16 @@ import (
"testing" "testing"
"time" "time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fake_fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake" fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util" "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper" "k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test" . "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" extensionsv1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@@ -39,51 +39,51 @@ import (
) )
func TestNamespaceController(t *testing.T) { func TestNamespaceController(t *testing.T) {
cluster1 := NewCluster("cluster1", api_v1.ConditionTrue) cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", api_v1.ConditionTrue) cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
ns1 := api_v1.Namespace{ ns1 := apiv1.Namespace{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Name: "test-namespace", Name: "test-namespace",
SelfLink: "/api/v1/namespaces/test-namespace", SelfLink: "/api/v1/namespaces/test-namespace",
}, },
Spec: api_v1.NamespaceSpec{ Spec: apiv1.NamespaceSpec{
Finalizers: []api_v1.FinalizerName{api_v1.FinalizerKubernetes}, Finalizers: []apiv1.FinalizerName{apiv1.FinalizerKubernetes},
}, },
} }
fakeClient := &fake_fedclientset.Clientset{} fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federation_api.ClusterList{Items: []federation_api.Cluster{*cluster1}}) RegisterFakeList("clusters", &fakeClient.Fake, &federationapi.ClusterList{Items: []federationapi.Cluster{*cluster1}})
RegisterFakeList("namespaces", &fakeClient.Fake, &api_v1.NamespaceList{Items: []api_v1.Namespace{}}) RegisterFakeList("namespaces", &fakeClient.Fake, &apiv1.NamespaceList{Items: []apiv1.Namespace{}})
namespaceWatch := RegisterFakeWatch("namespaces", &fakeClient.Fake) namespaceWatch := RegisterFakeWatch("namespaces", &fakeClient.Fake)
namespaceCreateChan := RegisterFakeCopyOnCreate("namespaces", &fakeClient.Fake, namespaceWatch) namespaceCreateChan := RegisterFakeCopyOnCreate("namespaces", &fakeClient.Fake, namespaceWatch)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake) clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fake_kubeclientset.Clientset{} cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("namespaces", &cluster1Client.Fake) cluster1Watch := RegisterFakeWatch("namespaces", &cluster1Client.Fake)
RegisterFakeList("namespaces", &cluster1Client.Fake, &api_v1.NamespaceList{Items: []api_v1.Namespace{}}) RegisterFakeList("namespaces", &cluster1Client.Fake, &apiv1.NamespaceList{Items: []apiv1.Namespace{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("namespaces", &cluster1Client.Fake, cluster1Watch) cluster1CreateChan := RegisterFakeCopyOnCreate("namespaces", &cluster1Client.Fake, cluster1Watch)
// cluster1UpdateChan := RegisterFakeCopyOnUpdate("namespaces", &cluster1Client.Fake, cluster1Watch) // cluster1UpdateChan := RegisterFakeCopyOnUpdate("namespaces", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fake_kubeclientset.Clientset{} cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("namespaces", &cluster2Client.Fake) cluster2Watch := RegisterFakeWatch("namespaces", &cluster2Client.Fake)
RegisterFakeList("namespaces", &cluster2Client.Fake, &api_v1.NamespaceList{Items: []api_v1.Namespace{}}) RegisterFakeList("namespaces", &cluster2Client.Fake, &apiv1.NamespaceList{Items: []apiv1.Namespace{}})
cluster2CreateChan := RegisterFakeCopyOnCreate("namespaces", &cluster2Client.Fake, cluster2Watch) cluster2CreateChan := RegisterFakeCopyOnCreate("namespaces", &cluster2Client.Fake, cluster2Watch)
RegisterFakeList("replicasets", &fakeClient.Fake, &extensionsv1.ReplicaSetList{Items: []extensionsv1.ReplicaSet{ RegisterFakeList("replicasets", &fakeClient.Fake, &extensionsv1.ReplicaSetList{Items: []extensionsv1.ReplicaSet{
{ {
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Name: "test-rs", Name: "test-rs",
Namespace: ns1.Namespace, Namespace: ns1.Namespace,
}}}}) }}}})
RegisterFakeList("secrets", &fakeClient.Fake, &api_v1.SecretList{Items: []api_v1.Secret{ RegisterFakeList("secrets", &fakeClient.Fake, &apiv1.SecretList{Items: []apiv1.Secret{
{ {
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Name: "test-secret", Name: "test-secret",
Namespace: ns1.Namespace, Namespace: ns1.Namespace,
}}}}) }}}})
RegisterFakeList("services", &fakeClient.Fake, &api_v1.ServiceList{Items: []api_v1.Service{ RegisterFakeList("services", &fakeClient.Fake, &apiv1.ServiceList{Items: []apiv1.Service{
{ {
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Name: "test-service", Name: "test-service",
Namespace: ns1.Namespace, Namespace: ns1.Namespace,
}}}}) }}}})
@@ -93,7 +93,7 @@ func TestNamespaceController(t *testing.T) {
secretDeleteChan := RegisterDeleteCollection(&fakeClient.Fake, "secrets") secretDeleteChan := RegisterDeleteCollection(&fakeClient.Fake, "secrets")
namespaceController := NewNamespaceController(fakeClient) namespaceController := NewNamespaceController(fakeClient)
informerClientFactory := func(cluster *federation_api.Cluster) (kubeclientset.Interface, error) { informerClientFactory := func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name { switch cluster.Name {
case cluster1.Name: case cluster1.Name:
return cluster1Client, nil return cluster1Client, nil
@@ -155,7 +155,7 @@ func TestNamespaceController(t *testing.T) {
// Delete the namespace with orphan finalizer (let namespaces // Delete the namespace with orphan finalizer (let namespaces
// in underlying clusters be as is). // in underlying clusters be as is).
// TODO: Add a test without orphan finalizer. // TODO: Add a test without orphan finalizer.
ns1.ObjectMeta.Finalizers = append(ns1.ObjectMeta.Finalizers, api_v1.FinalizerOrphan) ns1.ObjectMeta.Finalizers = append(ns1.ObjectMeta.Finalizers, apiv1.FinalizerOrphan)
ns1.DeletionTimestamp = &unversioned.Time{Time: time.Now()} ns1.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
namespaceWatch.Modify(&ns1) namespaceWatch.Modify(&ns1)
assert.Equal(t, ns1.Name, GetStringFromChan(nsDeleteChan)) assert.Equal(t, ns1.Name, GetStringFromChan(nsDeleteChan))
@@ -166,7 +166,7 @@ func TestNamespaceController(t *testing.T) {
close(stop) close(stop)
} }
func setClientFactory(informer util.FederatedInformer, informerClientFactory func(*federation_api.Cluster) (kubeclientset.Interface, error)) { func setClientFactory(informer util.FederatedInformer, informerClientFactory func(*federationapi.Cluster) (kubeclientset.Interface, error)) {
testInformer := ToFederatedInformerForTestOnly(informer) testInformer := ToFederatedInformerForTestOnly(informer)
testInformer.SetClientFactory(informerClientFactory) testInformer.SetClientFactory(informerClientFactory)
} }
@@ -199,8 +199,8 @@ func GetStringFromChan(c chan string) string {
} }
} }
func GetNamespaceFromChan(c chan runtime.Object) *api_v1.Namespace { func GetNamespaceFromChan(c chan runtime.Object) *apiv1.Namespace {
namespace := GetObjectFromChan(c).(*api_v1.Namespace) namespace := GetObjectFromChan(c).(*apiv1.Namespace)
return namespace return namespace
} }

View File

@@ -20,20 +20,20 @@ import (
"fmt" "fmt"
"time" "time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util" "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper" "k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink" "k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/conversion"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@@ -85,7 +85,7 @@ type SecretController struct {
func NewSecretController(client federationclientset.Interface) *SecretController { func NewSecretController(client federationclientset.Interface) *SecretController {
broadcaster := record.NewBroadcaster() broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client)) broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(api_v1.EventSource{Component: "federated-secrets-controller"}) recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-secrets-controller"})
secretcontroller := &SecretController{ secretcontroller := &SecretController{
federatedApiClient: client, federatedApiClient: client,
@@ -104,43 +104,43 @@ func NewSecretController(client federationclientset.Interface) *SecretController
// Start informer in federated API servers on secrets that should be federated. // Start informer in federated API servers on secrets that should be federated.
secretcontroller.secretInformerStore, secretcontroller.secretInformerController = cache.NewInformer( secretcontroller.secretInformerStore, secretcontroller.secretInformerController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return client.Core().Secrets(api_v1.NamespaceAll).List(options) return client.Core().Secrets(apiv1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return client.Core().Secrets(api_v1.NamespaceAll).Watch(options) return client.Core().Secrets(apiv1.NamespaceAll).Watch(options)
}, },
}, },
&api_v1.Secret{}, &apiv1.Secret{},
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { secretcontroller.deliverSecretObj(obj, 0, false) })) util.NewTriggerOnAllChanges(func(obj pkgruntime.Object) { secretcontroller.deliverSecretObj(obj, 0, false) }))
// Federated informer on secrets in members of federation. // Federated informer on secrets in members of federation.
secretcontroller.secretFederatedInformer = util.NewFederatedInformer( secretcontroller.secretFederatedInformer = util.NewFederatedInformer(
client, client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) { func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Core().Secrets(api_v1.NamespaceAll).List(options) return targetClient.Core().Secrets(apiv1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return targetClient.Core().Secrets(api_v1.NamespaceAll).Watch(options) return targetClient.Core().Secrets(apiv1.NamespaceAll).Watch(options)
}, },
}, },
&api_v1.Secret{}, &apiv1.Secret{},
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it // Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some secret opration succeeded. // would be just confirmation that some secret opration succeeded.
util.NewTriggerOnAllChanges( util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) { func(obj pkgruntime.Object) {
secretcontroller.deliverSecretObj(obj, secretcontroller.secretReviewDelay, false) secretcontroller.deliverSecretObj(obj, secretcontroller.secretReviewDelay, false)
}, },
)) ))
}, },
&util.ClusterLifecycleHandlerFuncs{ &util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) { ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the secrets again. // When new cluster becomes available process all the secrets again.
secretcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(secretcontroller.clusterAvailableDelay)) secretcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(secretcontroller.clusterAvailableDelay))
}, },
@@ -149,19 +149,19 @@ func NewSecretController(client federationclientset.Interface) *SecretController
// Federated updeater along with Create/Update/Delete operations. // Federated updeater along with Create/Update/Delete operations.
secretcontroller.federatedUpdater = util.NewFederatedUpdater(secretcontroller.secretFederatedInformer, secretcontroller.federatedUpdater = util.NewFederatedUpdater(secretcontroller.secretFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
secret := obj.(*api_v1.Secret) secret := obj.(*apiv1.Secret)
_, err := client.Core().Secrets(secret.Namespace).Create(secret) _, err := client.Core().Secrets(secret.Namespace).Create(secret)
return err return err
}, },
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
secret := obj.(*api_v1.Secret) secret := obj.(*apiv1.Secret)
_, err := client.Core().Secrets(secret.Namespace).Update(secret) _, err := client.Core().Secrets(secret.Namespace).Update(secret)
return err return err
}, },
func(client kubeclientset.Interface, obj pkg_runtime.Object) error { func(client kubeclientset.Interface, obj pkgruntime.Object) error {
secret := obj.(*api_v1.Secret) secret := obj.(*apiv1.Secret)
err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &api_v1.DeleteOptions{}) err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &apiv1.DeleteOptions{})
return err return err
}) })
@@ -170,8 +170,8 @@ func NewSecretController(client federationclientset.Interface) *SecretController
secretcontroller.removeFinalizerFunc, secretcontroller.removeFinalizerFunc,
secretcontroller.addFinalizerFunc, secretcontroller.addFinalizerFunc,
// objNameFunc // objNameFunc
func(obj pkg_runtime.Object) string { func(obj pkgruntime.Object) string {
secret := obj.(*api_v1.Secret) secret := obj.(*apiv1.Secret)
return secret.Name return secret.Name
}, },
secretcontroller.updateTimeout, secretcontroller.updateTimeout,
@@ -184,8 +184,8 @@ func NewSecretController(client federationclientset.Interface) *SecretController
} }
// Returns true if the given object has the given finalizer in its ObjectMeta. // Returns true if the given object has the given finalizer in its ObjectMeta.
func (secretcontroller *SecretController) hasFinalizerFunc(obj pkg_runtime.Object, finalizer string) bool { func (secretcontroller *SecretController) hasFinalizerFunc(obj pkgruntime.Object, finalizer string) bool {
secret := obj.(*api_v1.Secret) secret := obj.(*apiv1.Secret)
for i := range secret.ObjectMeta.Finalizers { for i := range secret.ObjectMeta.Finalizers {
if string(secret.ObjectMeta.Finalizers[i]) == finalizer { if string(secret.ObjectMeta.Finalizers[i]) == finalizer {
return true return true
@@ -196,8 +196,8 @@ func (secretcontroller *SecretController) hasFinalizerFunc(obj pkg_runtime.Objec
// Removes the finalizer from the given objects ObjectMeta. // Removes the finalizer from the given objects ObjectMeta.
// Assumes that the given object is a secret. // Assumes that the given object is a secret.
func (secretcontroller *SecretController) removeFinalizerFunc(obj pkg_runtime.Object, finalizer string) (pkg_runtime.Object, error) { func (secretcontroller *SecretController) removeFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
secret := obj.(*api_v1.Secret) secret := obj.(*apiv1.Secret)
newFinalizers := []string{} newFinalizers := []string{}
hasFinalizer := false hasFinalizer := false
for i := range secret.ObjectMeta.Finalizers { for i := range secret.ObjectMeta.Finalizers {
@@ -221,8 +221,8 @@ func (secretcontroller *SecretController) removeFinalizerFunc(obj pkg_runtime.Ob
// Adds the given finalizer to the given objects ObjectMeta. // Adds the given finalizer to the given objects ObjectMeta.
// Assumes that the given object is a secret. // Assumes that the given object is a secret.
func (secretcontroller *SecretController) addFinalizerFunc(obj pkg_runtime.Object, finalizer string) (pkg_runtime.Object, error) { func (secretcontroller *SecretController) addFinalizerFunc(obj pkgruntime.Object, finalizer string) (pkgruntime.Object, error) {
secret := obj.(*api_v1.Secret) secret := obj.(*apiv1.Secret)
secret.ObjectMeta.Finalizers = append(secret.ObjectMeta.Finalizers, finalizer) secret.ObjectMeta.Finalizers = append(secret.ObjectMeta.Finalizers, finalizer)
secret, err := secretcontroller.federatedApiClient.Core().Secrets(secret.Namespace).Update(secret) secret, err := secretcontroller.federatedApiClient.Core().Secrets(secret.Namespace).Update(secret)
if err != nil { if err != nil {
@@ -249,7 +249,7 @@ func (secretcontroller *SecretController) Run(stopChan <-chan struct{}) {
} }
func (secretcontroller *SecretController) deliverSecretObj(obj interface{}, delay time.Duration, failed bool) { func (secretcontroller *SecretController) deliverSecretObj(obj interface{}, delay time.Duration, failed bool) {
secret := obj.(*api_v1.Secret) secret := obj.(*apiv1.Secret)
secretcontroller.deliverSecret(types.NamespacedName{Namespace: secret.Namespace, Name: secret.Name}, delay, failed) secretcontroller.deliverSecret(types.NamespacedName{Namespace: secret.Namespace, Name: secret.Name}, delay, failed)
} }
@@ -289,7 +289,7 @@ func (secretcontroller *SecretController) reconcileSecretsOnClusterChange() {
secretcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(secretcontroller.clusterAvailableDelay)) secretcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(secretcontroller.clusterAvailableDelay))
} }
for _, obj := range secretcontroller.secretInformerStore.List() { for _, obj := range secretcontroller.secretInformerStore.List() {
secret := obj.(*api_v1.Secret) secret := obj.(*apiv1.Secret)
secretcontroller.deliverSecret(types.NamespacedName{Namespace: secret.Namespace, Name: secret.Name}, secretcontroller.smallDelay, false) secretcontroller.deliverSecret(types.NamespacedName{Namespace: secret.Namespace, Name: secret.Name}, secretcontroller.smallDelay, false)
} }
} }
@@ -316,7 +316,7 @@ func (secretcontroller *SecretController) reconcileSecret(secret types.Namespace
// Create a copy before modifying the obj to prevent race condition with // Create a copy before modifying the obj to prevent race condition with
// other readers of obj from store. // other readers of obj from store.
baseSecretObj, err := conversion.NewCloner().DeepCopy(baseSecretObjFromStore) baseSecretObj, err := conversion.NewCloner().DeepCopy(baseSecretObjFromStore)
baseSecret, ok := baseSecretObj.(*api_v1.Secret) baseSecret, ok := baseSecretObj.(*apiv1.Secret)
if err != nil || !ok { if err != nil || !ok {
glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err) glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err)
secretcontroller.deliverSecret(secret, 0, true) secretcontroller.deliverSecret(secret, 0, true)
@@ -342,7 +342,7 @@ func (secretcontroller *SecretController) reconcileSecret(secret types.Namespace
secretcontroller.deliverSecret(secret, 0, false) secretcontroller.deliverSecret(secret, 0, false)
return return
} }
baseSecret = updatedSecretObj.(*api_v1.Secret) baseSecret = updatedSecretObj.(*apiv1.Secret)
glog.V(3).Infof("Syncing secret %s in underlying clusters", baseSecret.Name) glog.V(3).Infof("Syncing secret %s in underlying clusters", baseSecret.Name)
@@ -363,7 +363,7 @@ func (secretcontroller *SecretController) reconcileSecret(secret types.Namespace
} }
// The data should not be modified. // The data should not be modified.
desiredSecret := &api_v1.Secret{ desiredSecret := &apiv1.Secret{
ObjectMeta: util.DeepCopyRelevantObjectMeta(baseSecret.ObjectMeta), ObjectMeta: util.DeepCopyRelevantObjectMeta(baseSecret.ObjectMeta),
Data: baseSecret.Data, Data: baseSecret.Data,
Type: baseSecret.Type, Type: baseSecret.Type,
@@ -379,7 +379,7 @@ func (secretcontroller *SecretController) reconcileSecret(secret types.Namespace
ClusterName: cluster.Name, ClusterName: cluster.Name,
}) })
} else { } else {
clusterSecret := clusterSecretObj.(*api_v1.Secret) clusterSecret := clusterSecretObj.(*apiv1.Secret)
// Update existing secret, if needed. // Update existing secret, if needed.
if !util.SecretEquivalent(*desiredSecret, *clusterSecret) { if !util.SecretEquivalent(*desiredSecret, *clusterSecret) {
@@ -416,7 +416,7 @@ func (secretcontroller *SecretController) reconcileSecret(secret types.Namespace
} }
// delete deletes the given secret or returns error if the deletion was not complete. // delete deletes the given secret or returns error if the deletion was not complete.
func (secretcontroller *SecretController) delete(secret *api_v1.Secret) error { func (secretcontroller *SecretController) delete(secret *apiv1.Secret) error {
glog.V(3).Infof("Handling deletion of secret: %v", *secret) glog.V(3).Infof("Handling deletion of secret: %v", *secret)
_, err := secretcontroller.deletionHelper.HandleObjectInUnderlyingClusters(secret) _, err := secretcontroller.deletionHelper.HandleObjectInUnderlyingClusters(secret)
if err != nil { if err != nil {

View File

@@ -22,14 +22,14 @@ import (
"testing" "testing"
"time" "time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fake_fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake" fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
"k8s.io/kubernetes/federation/pkg/federation-controller/util" "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper" "k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test" . "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@@ -38,29 +38,29 @@ import (
) )
func TestSecretController(t *testing.T) { func TestSecretController(t *testing.T) {
cluster1 := NewCluster("cluster1", api_v1.ConditionTrue) cluster1 := NewCluster("cluster1", apiv1.ConditionTrue)
cluster2 := NewCluster("cluster2", api_v1.ConditionTrue) cluster2 := NewCluster("cluster2", apiv1.ConditionTrue)
fakeClient := &fake_fedclientset.Clientset{} fakeClient := &fakefedclientset.Clientset{}
RegisterFakeList("clusters", &fakeClient.Fake, &federation_api.ClusterList{Items: []federation_api.Cluster{*cluster1}}) RegisterFakeList("clusters", &fakeClient.Fake, &federationapi.ClusterList{Items: []federationapi.Cluster{*cluster1}})
RegisterFakeList("secrets", &fakeClient.Fake, &api_v1.SecretList{Items: []api_v1.Secret{}}) RegisterFakeList("secrets", &fakeClient.Fake, &apiv1.SecretList{Items: []apiv1.Secret{}})
secretWatch := RegisterFakeWatch("secrets", &fakeClient.Fake) secretWatch := RegisterFakeWatch("secrets", &fakeClient.Fake)
secretUpdateChan := RegisterFakeCopyOnUpdate("secrets", &fakeClient.Fake, secretWatch) secretUpdateChan := RegisterFakeCopyOnUpdate("secrets", &fakeClient.Fake, secretWatch)
clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake) clusterWatch := RegisterFakeWatch("clusters", &fakeClient.Fake)
cluster1Client := &fake_kubeclientset.Clientset{} cluster1Client := &fakekubeclientset.Clientset{}
cluster1Watch := RegisterFakeWatch("secrets", &cluster1Client.Fake) cluster1Watch := RegisterFakeWatch("secrets", &cluster1Client.Fake)
RegisterFakeList("secrets", &cluster1Client.Fake, &api_v1.SecretList{Items: []api_v1.Secret{}}) RegisterFakeList("secrets", &cluster1Client.Fake, &apiv1.SecretList{Items: []apiv1.Secret{}})
cluster1CreateChan := RegisterFakeCopyOnCreate("secrets", &cluster1Client.Fake, cluster1Watch) cluster1CreateChan := RegisterFakeCopyOnCreate("secrets", &cluster1Client.Fake, cluster1Watch)
// cluster1UpdateChan := RegisterFakeCopyOnUpdate("secrets", &cluster1Client.Fake, cluster1Watch) // cluster1UpdateChan := RegisterFakeCopyOnUpdate("secrets", &cluster1Client.Fake, cluster1Watch)
cluster2Client := &fake_kubeclientset.Clientset{} cluster2Client := &fakekubeclientset.Clientset{}
cluster2Watch := RegisterFakeWatch("secrets", &cluster2Client.Fake) cluster2Watch := RegisterFakeWatch("secrets", &cluster2Client.Fake)
RegisterFakeList("secrets", &cluster2Client.Fake, &api_v1.SecretList{Items: []api_v1.Secret{}}) RegisterFakeList("secrets", &cluster2Client.Fake, &apiv1.SecretList{Items: []apiv1.Secret{}})
cluster2CreateChan := RegisterFakeCopyOnCreate("secrets", &cluster2Client.Fake, cluster2Watch) cluster2CreateChan := RegisterFakeCopyOnCreate("secrets", &cluster2Client.Fake, cluster2Watch)
secretController := NewSecretController(fakeClient) secretController := NewSecretController(fakeClient)
informerClientFactory := func(cluster *federation_api.Cluster) (kubeclientset.Interface, error) { informerClientFactory := func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
switch cluster.Name { switch cluster.Name {
case cluster1.Name: case cluster1.Name:
return cluster1Client, nil return cluster1Client, nil
@@ -80,8 +80,8 @@ func TestSecretController(t *testing.T) {
stop := make(chan struct{}) stop := make(chan struct{})
secretController.Run(stop) secretController.Run(stop)
secret1 := api_v1.Secret{ secret1 := apiv1.Secret{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Name: "test-secret", Name: "test-secret",
Namespace: "ns", Namespace: "ns",
SelfLink: "/api/v1/namespaces/ns/secrets/test-secret", SelfLink: "/api/v1/namespaces/ns/secrets/test-secret",
@@ -90,7 +90,7 @@ func TestSecretController(t *testing.T) {
"A": []byte("ala ma kota"), "A": []byte("ala ma kota"),
"B": []byte("quick brown fox"), "B": []byte("quick brown fox"),
}, },
Type: api_v1.SecretTypeOpaque, Type: apiv1.SecretTypeOpaque,
} }
// Test add federated secret. // Test add federated secret.
@@ -99,7 +99,7 @@ func TestSecretController(t *testing.T) {
updatedSecret := GetSecretFromChan(secretUpdateChan) updatedSecret := GetSecretFromChan(secretUpdateChan)
assert.True(t, secretController.hasFinalizerFunc(updatedSecret, deletionhelper.FinalizerDeleteFromUnderlyingClusters)) assert.True(t, secretController.hasFinalizerFunc(updatedSecret, deletionhelper.FinalizerDeleteFromUnderlyingClusters))
updatedSecret = GetSecretFromChan(secretUpdateChan) updatedSecret = GetSecretFromChan(secretUpdateChan)
assert.True(t, secretController.hasFinalizerFunc(updatedSecret, api_v1.FinalizerOrphan)) assert.True(t, secretController.hasFinalizerFunc(updatedSecret, apiv1.FinalizerOrphan))
secret1 = *updatedSecret secret1 = *updatedSecret
// Verify that the secret is created in underlying cluster1. // Verify that the secret is created in underlying cluster1.
@@ -161,12 +161,12 @@ func TestSecretController(t *testing.T) {
close(stop) close(stop)
} }
func setClientFactory(informer util.FederatedInformer, informerClientFactory func(*federation_api.Cluster) (kubeclientset.Interface, error)) { func setClientFactory(informer util.FederatedInformer, informerClientFactory func(*federationapi.Cluster) (kubeclientset.Interface, error)) {
testInformer := ToFederatedInformerForTestOnly(informer) testInformer := ToFederatedInformerForTestOnly(informer)
testInformer.SetClientFactory(informerClientFactory) testInformer.SetClientFactory(informerClientFactory)
} }
func secretsEqual(a, b api_v1.Secret) bool { func secretsEqual(a, b apiv1.Secret) bool {
// Clear the SelfLink and ObjectMeta.Finalizers since they will be different // Clear the SelfLink and ObjectMeta.Finalizers since they will be different
// in resoure in federation control plane and resource in underlying cluster. // in resoure in federation control plane and resource in underlying cluster.
a.SelfLink = "" a.SelfLink = ""
@@ -176,20 +176,20 @@ func secretsEqual(a, b api_v1.Secret) bool {
return reflect.DeepEqual(a, b) return reflect.DeepEqual(a, b)
} }
func GetSecretFromChan(c chan runtime.Object) *api_v1.Secret { func GetSecretFromChan(c chan runtime.Object) *apiv1.Secret {
secret := GetObjectFromChan(c).(*api_v1.Secret) secret := GetObjectFromChan(c).(*apiv1.Secret)
return secret return secret
} }
// Wait till the store is updated with latest secret. // Wait till the store is updated with latest secret.
func WaitForSecretStoreUpdate(store util.FederatedReadOnlyStore, clusterName, key string, desiredSecret *api_v1.Secret, timeout time.Duration) error { func WaitForSecretStoreUpdate(store util.FederatedReadOnlyStore, clusterName, key string, desiredSecret *apiv1.Secret, timeout time.Duration) error {
retryInterval := 100 * time.Millisecond retryInterval := 100 * time.Millisecond
err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) { err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
obj, found, err := store.GetByKey(clusterName, key) obj, found, err := store.GetByKey(clusterName, key)
if !found || err != nil { if !found || err != nil {
return false, err return false, err
} }
equal := secretsEqual(*obj.(*api_v1.Secret), *desiredSecret) equal := secretsEqual(*obj.(*apiv1.Secret), *desiredSecret)
return equal, err return equal, err
}) })
return err return err

View File

@@ -24,7 +24,7 @@ import (
cache "k8s.io/kubernetes/pkg/client/cache" cache "k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/workqueue" "k8s.io/kubernetes/pkg/util/workqueue"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@@ -91,7 +91,7 @@ func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterNa
} }
cachedClusterClient.endpointStore.Store, cachedClusterClient.endpointController = cache.NewInformer( cachedClusterClient.endpointStore.Store, cachedClusterClient.endpointController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return clientset.Core().Endpoints(v1.NamespaceAll).List(options) return clientset.Core().Endpoints(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
@@ -115,7 +115,7 @@ func (cc *clusterClientCache) startClusterLW(cluster *v1beta1.Cluster, clusterNa
cachedClusterClient.serviceStore.Indexer, cachedClusterClient.serviceController = cache.NewIndexerInformer( cachedClusterClient.serviceStore.Indexer, cachedClusterClient.serviceController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return clientset.Core().Services(v1.NamespaceAll).List(options) return clientset.Core().Services(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {

View File

@@ -34,7 +34,7 @@ import (
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@@ -164,7 +164,7 @@ func New(federationClient fedclientset.Interface, dns dnsprovider.Interface,
} }
s.serviceStore.Indexer, s.serviceController = cache.NewIndexerInformer( s.serviceStore.Indexer, s.serviceController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return s.federationClient.Core().Services(v1.NamespaceAll).List(options) return s.federationClient.Core().Services(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
@@ -187,7 +187,7 @@ func New(federationClient fedclientset.Interface, dns dnsprovider.Interface,
) )
s.clusterStore.Store, s.clusterController = cache.NewInformer( s.clusterStore.Store, s.clusterController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return s.federationClient.Federation().Clusters().List(options) return s.federationClient.Federation().Clusters().List(options)
}, },
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {

View File

@@ -19,9 +19,9 @@ package eventsink
import ( import (
"testing" "testing"
fake_fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake" fakefedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
. "k8s.io/kubernetes/federation/pkg/federation-controller/util/test" . "k8s.io/kubernetes/federation/pkg/federation-controller/util/test"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@@ -29,7 +29,7 @@ import (
) )
func TestEventSink(t *testing.T) { func TestEventSink(t *testing.T) {
fakeFederationClient := &fake_fedclientset.Clientset{} fakeFederationClient := &fakefedclientset.Clientset{}
createdChan := make(chan runtime.Object, 100) createdChan := make(chan runtime.Object, 100)
fakeFederationClient.AddReactor("create", "events", func(action core.Action) (bool, runtime.Object, error) { fakeFederationClient.AddReactor("create", "events", func(action core.Action) (bool, runtime.Object, error) {
createAction := action.(core.CreateAction) createAction := action.(core.CreateAction)
@@ -45,8 +45,8 @@ func TestEventSink(t *testing.T) {
return true, obj, nil return true, obj, nil
}) })
event := api_v1.Event{ event := apiv1.Event{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Name: "bzium", Name: "bzium",
Namespace: "ns", Namespace: "ns",
}, },
@@ -54,7 +54,7 @@ func TestEventSink(t *testing.T) {
sink := NewFederatedEventSink(fakeFederationClient) sink := NewFederatedEventSink(fakeFederationClient)
eventUpdated, err := sink.Create(&event) eventUpdated, err := sink.Create(&event)
assert.NoError(t, err) assert.NoError(t, err)
eventV1 := GetObjectFromChan(createdChan).(*api_v1.Event) eventV1 := GetObjectFromChan(createdChan).(*apiv1.Event)
assert.NotNil(t, eventV1) assert.NotNil(t, eventV1)
// Just some simple sanity checks. // Just some simple sanity checks.
assert.Equal(t, event.Name, eventV1.Name) assert.Equal(t, event.Name, eventV1.Name)
@@ -62,7 +62,7 @@ func TestEventSink(t *testing.T) {
eventUpdated, err = sink.Update(&event) eventUpdated, err = sink.Update(&event)
assert.NoError(t, err) assert.NoError(t, err)
eventV1 = GetObjectFromChan(updateChan).(*api_v1.Event) eventV1 = GetObjectFromChan(updateChan).(*apiv1.Event)
assert.NotNil(t, eventV1) assert.NotNil(t, eventV1)
// Just some simple sanity checks. // Just some simple sanity checks.
assert.Equal(t, event.Name, eventV1.Name) assert.Equal(t, event.Name, eventV1.Name)

View File

@@ -22,13 +22,13 @@ import (
"sync" "sync"
"time" "time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" federationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
"github.com/golang/glog" "github.com/golang/glog"
@@ -68,7 +68,7 @@ type FederatedReadOnlyStore interface {
// issues occur less often. All users of the interface should assume // issues occur less often. All users of the interface should assume
// that there may be significant delays in content updates of all kinds and write their // that there may be significant delays in content updates of all kinds and write their
// code that it doesn't break if something is slightly out-of-sync. // code that it doesn't break if something is slightly out-of-sync.
ClustersSynced(clusters []*federation_api.Cluster) bool ClustersSynced(clusters []*federationapi.Cluster) bool
} }
// An interface to access federation members and clients. // An interface to access federation members and clients.
@@ -77,13 +77,13 @@ type FederationView interface {
GetClientsetForCluster(clusterName string) (kubeclientset.Interface, error) GetClientsetForCluster(clusterName string) (kubeclientset.Interface, error)
// GetUnreadyClusters returns a list of all clusters that are not ready yet. // GetUnreadyClusters returns a list of all clusters that are not ready yet.
GetUnreadyClusters() ([]*federation_api.Cluster, error) GetUnreadyClusters() ([]*federationapi.Cluster, error)
// GetReadyClusers returns all clusters for which the sub-informers are run. // GetReadyClusers returns all clusters for which the sub-informers are run.
GetReadyClusters() ([]*federation_api.Cluster, error) GetReadyClusters() ([]*federationapi.Cluster, error)
// GetReadyCluster returns the cluster with the given name, if found. // GetReadyCluster returns the cluster with the given name, if found.
GetReadyCluster(name string) (*federation_api.Cluster, bool, error) GetReadyCluster(name string) (*federationapi.Cluster, bool, error)
// ClustersSynced returns true if the view is synced (for the first time). // ClustersSynced returns true if the view is synced (for the first time).
ClustersSynced() bool ClustersSynced() bool
@@ -111,12 +111,12 @@ type FederatedInformer interface {
type FederatedInformerForTestOnly interface { type FederatedInformerForTestOnly interface {
FederatedInformer FederatedInformer
SetClientFactory(func(*federation_api.Cluster) (kubeclientset.Interface, error)) SetClientFactory(func(*federationapi.Cluster) (kubeclientset.Interface, error))
} }
// A function that should be used to create an informer on the target object. Store should use // A function that should be used to create an informer on the target object. Store should use
// cache.DeletionHandlingMetaNamespaceKeyFunc as a keying function. // cache.DeletionHandlingMetaNamespaceKeyFunc as a keying function.
type TargetInformerFactory func(*federation_api.Cluster, kubeclientset.Interface) (cache.Store, cache.ControllerInterface) type TargetInformerFactory func(*federationapi.Cluster, kubeclientset.Interface) (cache.Store, cache.ControllerInterface)
// A structure with cluster lifecycle handler functions. Cluster is available (and ClusterAvailable is fired) // A structure with cluster lifecycle handler functions. Cluster is available (and ClusterAvailable is fired)
// when it is created in federated etcd and ready. Cluster becomes unavailable (and ClusterUnavailable is fired) // when it is created in federated etcd and ready. Cluster becomes unavailable (and ClusterUnavailable is fired)
@@ -124,10 +124,10 @@ type TargetInformerFactory func(*federation_api.Cluster, kubeclientset.Interface
// and ClusterUnavailable are fired. // and ClusterUnavailable are fired.
type ClusterLifecycleHandlerFuncs struct { type ClusterLifecycleHandlerFuncs struct {
// Fired when the cluster becomes available. // Fired when the cluster becomes available.
ClusterAvailable func(*federation_api.Cluster) ClusterAvailable func(*federationapi.Cluster)
// Fired when the cluster becomes unavailable. The second arg contains data that was present // Fired when the cluster becomes unavailable. The second arg contains data that was present
// in the cluster before deletion. // in the cluster before deletion.
ClusterUnavailable func(*federation_api.Cluster, []interface{}) ClusterUnavailable func(*federationapi.Cluster, []interface{})
} }
// Builds a FederatedInformer for the given federation client and factory. // Builds a FederatedInformer for the given federation client and factory.
@@ -138,7 +138,7 @@ func NewFederatedInformer(
federatedInformer := &federatedInformerImpl{ federatedInformer := &federatedInformerImpl{
targetInformerFactory: targetInformerFactory, targetInformerFactory: targetInformerFactory,
clientFactory: func(cluster *federation_api.Cluster) (kubeclientset.Interface, error) { clientFactory: func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
clusterConfig, err := BuildClusterConfig(cluster) clusterConfig, err := BuildClusterConfig(cluster)
if err == nil && clusterConfig != nil { if err == nil && clusterConfig != nil {
clientset := kubeclientset.NewForConfigOrDie(restclient.AddUserAgent(clusterConfig, userAgentName)) clientset := kubeclientset.NewForConfigOrDie(restclient.AddUserAgent(clusterConfig, userAgentName))
@@ -160,18 +160,18 @@ func NewFederatedInformer(
federatedInformer.clusterInformer.store, federatedInformer.clusterInformer.controller = cache.NewInformer( federatedInformer.clusterInformer.store, federatedInformer.clusterInformer.controller = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return federationClient.Federation().Clusters().List(options) return federationClient.Federation().Clusters().List(options)
}, },
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return federationClient.Federation().Clusters().Watch(options) return federationClient.Federation().Clusters().Watch(options)
}, },
}, },
&federation_api.Cluster{}, &federationapi.Cluster{},
clusterSyncPeriod, clusterSyncPeriod,
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) { DeleteFunc: func(old interface{}) {
oldCluster, ok := old.(*federation_api.Cluster) oldCluster, ok := old.(*federationapi.Cluster)
if ok { if ok {
var data []interface{} var data []interface{}
if clusterLifecycle.ClusterUnavailable != nil { if clusterLifecycle.ClusterUnavailable != nil {
@@ -184,7 +184,7 @@ func NewFederatedInformer(
} }
}, },
AddFunc: func(cur interface{}) { AddFunc: func(cur interface{}) {
curCluster, ok := cur.(*federation_api.Cluster) curCluster, ok := cur.(*federationapi.Cluster)
if ok && isClusterReady(curCluster) { if ok && isClusterReady(curCluster) {
federatedInformer.addCluster(curCluster) federatedInformer.addCluster(curCluster)
if clusterLifecycle.ClusterAvailable != nil { if clusterLifecycle.ClusterAvailable != nil {
@@ -195,12 +195,12 @@ func NewFederatedInformer(
} }
}, },
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
oldCluster, ok := old.(*federation_api.Cluster) oldCluster, ok := old.(*federationapi.Cluster)
if !ok { if !ok {
glog.Errorf("Internal error: Cluster %v not updated. Old cluster not of correct type.", old) glog.Errorf("Internal error: Cluster %v not updated. Old cluster not of correct type.", old)
return return
} }
curCluster, ok := cur.(*federation_api.Cluster) curCluster, ok := cur.(*federationapi.Cluster)
if !ok { if !ok {
glog.Errorf("Internal error: Cluster %v not updated. New cluster not of correct type.", cur) glog.Errorf("Internal error: Cluster %v not updated. New cluster not of correct type.", cur)
return return
@@ -230,10 +230,10 @@ func NewFederatedInformer(
return federatedInformer return federatedInformer
} }
func isClusterReady(cluster *federation_api.Cluster) bool { func isClusterReady(cluster *federationapi.Cluster) bool {
for _, condition := range cluster.Status.Conditions { for _, condition := range cluster.Status.Conditions {
if condition.Type == federation_api.ClusterReady { if condition.Type == federationapi.ClusterReady {
if condition.Status == api_v1.ConditionTrue { if condition.Status == apiv1.ConditionTrue {
return true return true
} }
} }
@@ -260,7 +260,7 @@ type federatedInformerImpl struct {
targetInformers map[string]informer targetInformers map[string]informer
// A function to build clients. // A function to build clients.
clientFactory func(*federation_api.Cluster) (kubeclientset.Interface, error) clientFactory func(*federationapi.Cluster) (kubeclientset.Interface, error)
} }
// *federatedInformerImpl implements FederatedInformer interface. // *federatedInformerImpl implements FederatedInformer interface.
@@ -291,7 +291,7 @@ func (f *federatedInformerImpl) Start() {
go f.clusterInformer.controller.Run(f.clusterInformer.stopChan) go f.clusterInformer.controller.Run(f.clusterInformer.stopChan)
} }
func (f *federatedInformerImpl) SetClientFactory(clientFactory func(*federation_api.Cluster) (kubeclientset.Interface, error)) { func (f *federatedInformerImpl) SetClientFactory(clientFactory func(*federationapi.Cluster) (kubeclientset.Interface, error)) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
@@ -319,14 +319,14 @@ func (f *federatedInformerImpl) getClientsetForClusterUnlocked(clusterName strin
return nil, fmt.Errorf("cluster %q not found", clusterName) return nil, fmt.Errorf("cluster %q not found", clusterName)
} }
func (f *federatedInformerImpl) GetUnreadyClusters() ([]*federation_api.Cluster, error) { func (f *federatedInformerImpl) GetUnreadyClusters() ([]*federationapi.Cluster, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
items := f.clusterInformer.store.List() items := f.clusterInformer.store.List()
result := make([]*federation_api.Cluster, 0, len(items)) result := make([]*federationapi.Cluster, 0, len(items))
for _, item := range items { for _, item := range items {
if cluster, ok := item.(*federation_api.Cluster); ok { if cluster, ok := item.(*federationapi.Cluster); ok {
if !isClusterReady(cluster) { if !isClusterReady(cluster) {
result = append(result, cluster) result = append(result, cluster)
} }
@@ -338,14 +338,14 @@ func (f *federatedInformerImpl) GetUnreadyClusters() ([]*federation_api.Cluster,
} }
// GetReadyClusers returns all clusters for which the sub-informers are run. // GetReadyClusers returns all clusters for which the sub-informers are run.
func (f *federatedInformerImpl) GetReadyClusters() ([]*federation_api.Cluster, error) { func (f *federatedInformerImpl) GetReadyClusters() ([]*federationapi.Cluster, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
items := f.clusterInformer.store.List() items := f.clusterInformer.store.List()
result := make([]*federation_api.Cluster, 0, len(items)) result := make([]*federationapi.Cluster, 0, len(items))
for _, item := range items { for _, item := range items {
if cluster, ok := item.(*federation_api.Cluster); ok { if cluster, ok := item.(*federationapi.Cluster); ok {
if isClusterReady(cluster) { if isClusterReady(cluster) {
result = append(result, cluster) result = append(result, cluster)
} }
@@ -357,15 +357,15 @@ func (f *federatedInformerImpl) GetReadyClusters() ([]*federation_api.Cluster, e
} }
// GetCluster returns the cluster with the given name, if found. // GetCluster returns the cluster with the given name, if found.
func (f *federatedInformerImpl) GetReadyCluster(name string) (*federation_api.Cluster, bool, error) { func (f *federatedInformerImpl) GetReadyCluster(name string) (*federationapi.Cluster, bool, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
return f.getReadyClusterUnlocked(name) return f.getReadyClusterUnlocked(name)
} }
func (f *federatedInformerImpl) getReadyClusterUnlocked(name string) (*federation_api.Cluster, bool, error) { func (f *federatedInformerImpl) getReadyClusterUnlocked(name string) (*federationapi.Cluster, bool, error) {
if obj, exist, err := f.clusterInformer.store.GetByKey(name); exist && err == nil { if obj, exist, err := f.clusterInformer.store.GetByKey(name); exist && err == nil {
if cluster, ok := obj.(*federation_api.Cluster); ok { if cluster, ok := obj.(*federationapi.Cluster); ok {
if isClusterReady(cluster) { if isClusterReady(cluster) {
return cluster, true, nil return cluster, true, nil
} }
@@ -385,7 +385,7 @@ func (f *federatedInformerImpl) ClustersSynced() bool {
} }
// Adds the given cluster to federated informer. // Adds the given cluster to federated informer.
func (f *federatedInformerImpl) addCluster(cluster *federation_api.Cluster) { func (f *federatedInformerImpl) addCluster(cluster *federationapi.Cluster) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
name := cluster.Name name := cluster.Name
@@ -405,7 +405,7 @@ func (f *federatedInformerImpl) addCluster(cluster *federation_api.Cluster) {
} }
// Removes the cluster from federated informer. // Removes the cluster from federated informer.
func (f *federatedInformerImpl) deleteCluster(cluster *federation_api.Cluster) { func (f *federatedInformerImpl) deleteCluster(cluster *federationapi.Cluster) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
name := cluster.Name name := cluster.Name
@@ -486,7 +486,7 @@ func (fs *federatedStoreImpl) GetKeyFor(item interface{}) string {
// Checks whether stores for all clusters form the lists (and only these) are there and // Checks whether stores for all clusters form the lists (and only these) are there and
// are synced. // are synced.
func (fs *federatedStoreImpl) ClustersSynced(clusters []*federation_api.Cluster) bool { func (fs *federatedStoreImpl) ClustersSynced(clusters []*federationapi.Cluster) bool {
// Get the list of informers to check under a lock and check it outside. // Get the list of informers to check under a lock and check it outside.
okSoFar, informersToCheck := func() (bool, []informer) { okSoFar, informersToCheck := func() (bool, []informer) {

View File

@@ -20,12 +20,12 @@ import (
"testing" "testing"
"time" "time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fakefederationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake" fakefederationclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/fake"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@@ -39,18 +39,18 @@ func TestFederatedInformer(t *testing.T) {
fakeFederationClient := &fakefederationclientset.Clientset{} fakeFederationClient := &fakefederationclientset.Clientset{}
// Add a single cluster to federation and remove it when needed. // Add a single cluster to federation and remove it when needed.
cluster := federation_api.Cluster{ cluster := federationapi.Cluster{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Name: "mycluster", Name: "mycluster",
}, },
Status: federation_api.ClusterStatus{ Status: federationapi.ClusterStatus{
Conditions: []federation_api.ClusterCondition{ Conditions: []federationapi.ClusterCondition{
{Type: federation_api.ClusterReady, Status: api_v1.ConditionTrue}, {Type: federationapi.ClusterReady, Status: apiv1.ConditionTrue},
}, },
}, },
} }
fakeFederationClient.AddReactor("list", "clusters", func(action core.Action) (bool, runtime.Object, error) { fakeFederationClient.AddReactor("list", "clusters", func(action core.Action) (bool, runtime.Object, error) {
return true, &federation_api.ClusterList{Items: []federation_api.Cluster{cluster}}, nil return true, &federationapi.ClusterList{Items: []federationapi.Cluster{cluster}}, nil
}) })
deleteChan := make(chan struct{}) deleteChan := make(chan struct{})
fakeFederationClient.AddWatchReactor("clusters", func(action core.Action) (bool, watch.Interface, error) { fakeFederationClient.AddWatchReactor("clusters", func(action core.Action) (bool, watch.Interface, error) {
@@ -62,32 +62,32 @@ func TestFederatedInformer(t *testing.T) {
return true, fakeWatch, nil return true, fakeWatch, nil
}) })
fakeKubeClient := &fake_kubeclientset.Clientset{} fakeKubeClient := &fakekubeclientset.Clientset{}
// There is a single service ns1/s1 in cluster mycluster. // There is a single service ns1/s1 in cluster mycluster.
service := api_v1.Service{ service := apiv1.Service{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1", Namespace: "ns1",
Name: "s1", Name: "s1",
}, },
} }
fakeKubeClient.AddReactor("list", "services", func(action core.Action) (bool, runtime.Object, error) { fakeKubeClient.AddReactor("list", "services", func(action core.Action) (bool, runtime.Object, error) {
return true, &api_v1.ServiceList{Items: []api_v1.Service{service}}, nil return true, &apiv1.ServiceList{Items: []apiv1.Service{service}}, nil
}) })
fakeKubeClient.AddWatchReactor("services", func(action core.Action) (bool, watch.Interface, error) { fakeKubeClient.AddWatchReactor("services", func(action core.Action) (bool, watch.Interface, error) {
return true, watch.NewFake(), nil return true, watch.NewFake(), nil
}) })
targetInformerFactory := func(cluster *federation_api.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.ControllerInterface) { targetInformerFactory := func(cluster *federationapi.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (runtime.Object, error) { ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) {
return clientset.Core().Services(api_v1.NamespaceAll).List(options) return clientset.Core().Services(apiv1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return clientset.Core().Services(api_v1.NamespaceAll).Watch(options) return clientset.Core().Services(apiv1.NamespaceAll).Watch(options)
}, },
}, },
&api_v1.Service{}, &apiv1.Service{},
10*time.Second, 10*time.Second,
cache.ResourceEventHandlerFuncs{}) cache.ResourceEventHandlerFuncs{})
} }
@@ -95,25 +95,25 @@ func TestFederatedInformer(t *testing.T) {
addedClusters := make(chan string, 1) addedClusters := make(chan string, 1)
deletedClusters := make(chan string, 1) deletedClusters := make(chan string, 1)
lifecycle := ClusterLifecycleHandlerFuncs{ lifecycle := ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) { ClusterAvailable: func(cluster *federationapi.Cluster) {
addedClusters <- cluster.Name addedClusters <- cluster.Name
close(addedClusters) close(addedClusters)
}, },
ClusterUnavailable: func(cluster *federation_api.Cluster, _ []interface{}) { ClusterUnavailable: func(cluster *federationapi.Cluster, _ []interface{}) {
deletedClusters <- cluster.Name deletedClusters <- cluster.Name
close(deletedClusters) close(deletedClusters)
}, },
} }
informer := NewFederatedInformer(fakeFederationClient, targetInformerFactory, &lifecycle).(*federatedInformerImpl) informer := NewFederatedInformer(fakeFederationClient, targetInformerFactory, &lifecycle).(*federatedInformerImpl)
informer.clientFactory = func(cluster *federation_api.Cluster) (kubeclientset.Interface, error) { informer.clientFactory = func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
return fakeKubeClient, nil return fakeKubeClient, nil
} }
assert.NotNil(t, informer) assert.NotNil(t, informer)
informer.Start() informer.Start()
// Wait until mycluster is synced. // Wait until mycluster is synced.
for !informer.GetTargetStore().ClustersSynced([]*federation_api.Cluster{&cluster}) { for !informer.GetTargetStore().ClustersSynced([]*federationapi.Cluster{&cluster}) {
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
} }
readyClusters, err := informer.GetReadyClusters() readyClusters, err := informer.GetReadyClusters()
@@ -131,7 +131,7 @@ func TestFederatedInformer(t *testing.T) {
// All checked, lets delete the cluster. // All checked, lets delete the cluster.
deleteChan <- struct{}{} deleteChan <- struct{}{}
for !informer.GetTargetStore().ClustersSynced([]*federation_api.Cluster{}) { for !informer.GetTargetStore().ClustersSynced([]*federationapi.Cluster{}) {
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
} }
readyClusters, err = informer.GetReadyClusters() readyClusters, err = informer.GetReadyClusters()

View File

@@ -21,7 +21,7 @@ import (
"time" "time"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkgruntime "k8s.io/kubernetes/pkg/runtime"
) )
// Type of the operation that can be executed in Federated. // Type of the operation that can be executed in Federated.
@@ -37,7 +37,7 @@ const (
type FederatedOperation struct { type FederatedOperation struct {
Type FederatedOperationType Type FederatedOperationType
ClusterName string ClusterName string
Obj pkg_runtime.Object Obj pkgruntime.Object
} }
// A helper that executes the given set of updates on federation, in parallel. // A helper that executes the given set of updates on federation, in parallel.
@@ -52,7 +52,7 @@ type FederatedUpdater interface {
} }
// A function that executes some operation using the passed client and object. // A function that executes some operation using the passed client and object.
type FederatedOperationHandler func(kubeclientset.Interface, pkg_runtime.Object) error type FederatedOperationHandler func(kubeclientset.Interface, pkgruntime.Object) error
type federatedUpdaterImpl struct { type federatedUpdaterImpl struct {
federation FederationView federation FederationView

View File

@@ -21,11 +21,11 @@ import (
"testing" "testing"
"time" "time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
fake_kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" fakekubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkgruntime "k8s.io/kubernetes/pkg/runtime"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -38,18 +38,18 @@ type fakeFederationView struct {
var _ FederationView = &fakeFederationView{} var _ FederationView = &fakeFederationView{}
func (f *fakeFederationView) GetClientsetForCluster(clusterName string) (kubeclientset.Interface, error) { func (f *fakeFederationView) GetClientsetForCluster(clusterName string) (kubeclientset.Interface, error) {
return &fake_kubeclientset.Clientset{}, nil return &fakekubeclientset.Clientset{}, nil
} }
func (f *fakeFederationView) GetReadyClusters() ([]*federation_api.Cluster, error) { func (f *fakeFederationView) GetReadyClusters() ([]*federationapi.Cluster, error) {
return []*federation_api.Cluster{}, nil return []*federationapi.Cluster{}, nil
} }
func (f *fakeFederationView) GetUnreadyClusters() ([]*federation_api.Cluster, error) { func (f *fakeFederationView) GetUnreadyClusters() ([]*federationapi.Cluster, error) {
return []*federation_api.Cluster{}, nil return []*federationapi.Cluster{}, nil
} }
func (f *fakeFederationView) GetReadyCluster(name string) (*federation_api.Cluster, bool, error) { func (f *fakeFederationView) GetReadyCluster(name string) (*federationapi.Cluster, bool, error) {
return nil, false, nil return nil, false, nil
} }
@@ -62,13 +62,13 @@ func TestFederatedUpdaterOK(t *testing.T) {
updateChan := make(chan string, 5) updateChan := make(chan string, 5)
updater := NewFederatedUpdater(&fakeFederationView{}, updater := NewFederatedUpdater(&fakeFederationView{},
func(_ kubeclientset.Interface, obj pkg_runtime.Object) error { func(_ kubeclientset.Interface, obj pkgruntime.Object) error {
service := obj.(*api_v1.Service) service := obj.(*apiv1.Service)
addChan <- service.Name addChan <- service.Name
return nil return nil
}, },
func(_ kubeclientset.Interface, obj pkg_runtime.Object) error { func(_ kubeclientset.Interface, obj pkgruntime.Object) error {
service := obj.(*api_v1.Service) service := obj.(*apiv1.Service)
updateChan <- service.Name updateChan <- service.Name
return nil return nil
}, },
@@ -93,7 +93,7 @@ func TestFederatedUpdaterOK(t *testing.T) {
func TestFederatedUpdaterError(t *testing.T) { func TestFederatedUpdaterError(t *testing.T) {
updater := NewFederatedUpdater(&fakeFederationView{}, updater := NewFederatedUpdater(&fakeFederationView{},
func(_ kubeclientset.Interface, obj pkg_runtime.Object) error { func(_ kubeclientset.Interface, obj pkgruntime.Object) error {
return fmt.Errorf("boom") return fmt.Errorf("boom")
}, noop, noop) }, noop, noop)
@@ -113,7 +113,7 @@ func TestFederatedUpdaterError(t *testing.T) {
func TestFederatedUpdaterTimeout(t *testing.T) { func TestFederatedUpdaterTimeout(t *testing.T) {
start := time.Now() start := time.Now()
updater := NewFederatedUpdater(&fakeFederationView{}, updater := NewFederatedUpdater(&fakeFederationView{},
func(_ kubeclientset.Interface, obj pkg_runtime.Object) error { func(_ kubeclientset.Interface, obj pkgruntime.Object) error {
time.Sleep(time.Minute) time.Sleep(time.Minute)
return nil return nil
}, },
@@ -134,15 +134,15 @@ func TestFederatedUpdaterTimeout(t *testing.T) {
assert.True(t, start.Add(10*time.Second).After(end)) assert.True(t, start.Add(10*time.Second).After(end))
} }
func makeService(cluster, name string) *api_v1.Service { func makeService(cluster, name string) *apiv1.Service {
return &api_v1.Service{ return &apiv1.Service{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1", Namespace: "ns1",
Name: name, Name: name,
}, },
} }
} }
func noop(_ kubeclientset.Interface, _ pkg_runtime.Object) error { func noop(_ kubeclientset.Interface, _ pkgruntime.Object) error {
return nil return nil
} }

View File

@@ -20,25 +20,25 @@ import (
"fmt" "fmt"
"reflect" "reflect"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkgruntime "k8s.io/kubernetes/pkg/runtime"
) )
// Returns cache.ResourceEventHandlerFuncs that trigger the given function // Returns cache.ResourceEventHandlerFuncs that trigger the given function
// on all object changes. // on all object changes.
func NewTriggerOnAllChanges(triggerFunc func(pkg_runtime.Object)) *cache.ResourceEventHandlerFuncs { func NewTriggerOnAllChanges(triggerFunc func(pkgruntime.Object)) *cache.ResourceEventHandlerFuncs {
return &cache.ResourceEventHandlerFuncs{ return &cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) { DeleteFunc: func(old interface{}) {
oldObj := old.(pkg_runtime.Object) oldObj := old.(pkgruntime.Object)
triggerFunc(oldObj) triggerFunc(oldObj)
}, },
AddFunc: func(cur interface{}) { AddFunc: func(cur interface{}) {
curObj := cur.(pkg_runtime.Object) curObj := cur.(pkgruntime.Object)
triggerFunc(curObj) triggerFunc(curObj)
}, },
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
curObj := cur.(pkg_runtime.Object) curObj := cur.(pkgruntime.Object)
if !reflect.DeepEqual(old, cur) { if !reflect.DeepEqual(old, cur) {
triggerFunc(curObj) triggerFunc(curObj)
} }
@@ -48,7 +48,7 @@ func NewTriggerOnAllChanges(triggerFunc func(pkg_runtime.Object)) *cache.Resourc
// Returns cache.ResourceEventHandlerFuncs that trigger the given function // Returns cache.ResourceEventHandlerFuncs that trigger the given function
// on object add and delete as well as spec/object meta on update. // on object add and delete as well as spec/object meta on update.
func NewTriggerOnMetaAndSpecChanges(triggerFunc func(pkg_runtime.Object)) *cache.ResourceEventHandlerFuncs { func NewTriggerOnMetaAndSpecChanges(triggerFunc func(pkgruntime.Object)) *cache.ResourceEventHandlerFuncs {
getFieldOrPanic := func(obj interface{}, fieldName string) interface{} { getFieldOrPanic := func(obj interface{}, fieldName string) interface{} {
val := reflect.ValueOf(obj).Elem().FieldByName(fieldName) val := reflect.ValueOf(obj).Elem().FieldByName(fieldName)
if val.IsValid() { if val.IsValid() {
@@ -59,17 +59,17 @@ func NewTriggerOnMetaAndSpecChanges(triggerFunc func(pkg_runtime.Object)) *cache
} }
return &cache.ResourceEventHandlerFuncs{ return &cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) { DeleteFunc: func(old interface{}) {
oldObj := old.(pkg_runtime.Object) oldObj := old.(pkgruntime.Object)
triggerFunc(oldObj) triggerFunc(oldObj)
}, },
AddFunc: func(cur interface{}) { AddFunc: func(cur interface{}) {
curObj := cur.(pkg_runtime.Object) curObj := cur.(pkgruntime.Object)
triggerFunc(curObj) triggerFunc(curObj)
}, },
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
curObj := cur.(pkg_runtime.Object) curObj := cur.(pkgruntime.Object)
oldMeta := getFieldOrPanic(old, "ObjectMeta").(api_v1.ObjectMeta) oldMeta := getFieldOrPanic(old, "ObjectMeta").(apiv1.ObjectMeta)
curMeta := getFieldOrPanic(cur, "ObjectMeta").(api_v1.ObjectMeta) curMeta := getFieldOrPanic(cur, "ObjectMeta").(apiv1.ObjectMeta)
if !ObjectMetaEquivalent(oldMeta, curMeta) || if !ObjectMetaEquivalent(oldMeta, curMeta) ||
!reflect.DeepEqual(getFieldOrPanic(old, "Spec"), getFieldOrPanic(cur, "Spec")) { !reflect.DeepEqual(getFieldOrPanic(old, "Spec"), getFieldOrPanic(cur, "Spec")) {
triggerFunc(curObj) triggerFunc(curObj)

View File

@@ -19,22 +19,22 @@ package util
import ( import (
"testing" "testing"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkgruntime "k8s.io/kubernetes/pkg/runtime"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestHandlers(t *testing.T) { func TestHandlers(t *testing.T) {
// There is a single service ns1/s1 in cluster mycluster. // There is a single service ns1/s1 in cluster mycluster.
service := api_v1.Service{ service := apiv1.Service{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1", Namespace: "ns1",
Name: "s1", Name: "s1",
}, },
} }
service2 := api_v1.Service{ service2 := apiv1.Service{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1", Namespace: "ns1",
Name: "s1", Name: "s1",
Annotations: map[string]string{ Annotations: map[string]string{
@@ -53,7 +53,7 @@ func TestHandlers(t *testing.T) {
} }
trigger := NewTriggerOnAllChanges( trigger := NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) { func(obj pkgruntime.Object) {
triggerChan <- struct{}{} triggerChan <- struct{}{}
}) })
@@ -67,7 +67,7 @@ func TestHandlers(t *testing.T) {
assert.True(t, triggered()) assert.True(t, triggered())
trigger2 := NewTriggerOnMetaAndSpecChanges( trigger2 := NewTriggerOnMetaAndSpecChanges(
func(obj pkg_runtime.Object) { func(obj pkgruntime.Object) {
triggerChan <- struct{}{} triggerChan <- struct{}{}
}, },
) )
@@ -81,14 +81,14 @@ func TestHandlers(t *testing.T) {
trigger2.OnUpdate(&service, &service2) trigger2.OnUpdate(&service, &service2)
assert.True(t, triggered()) assert.True(t, triggered())
service3 := api_v1.Service{ service3 := apiv1.Service{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Namespace: "ns1", Namespace: "ns1",
Name: "s1", Name: "s1",
}, },
Status: api_v1.ServiceStatus{ Status: apiv1.ServiceStatus{
LoadBalancer: api_v1.LoadBalancerStatus{ LoadBalancer: apiv1.LoadBalancerStatus{
Ingress: []api_v1.LoadBalancerIngress{{ Ingress: []apiv1.LoadBalancerIngress{{
Hostname: "A", Hostname: "A",
}}, }},
}, },

View File

@@ -20,19 +20,19 @@ import (
"hash/fnv" "hash/fnv"
"sort" "sort"
fed_api "k8s.io/kubernetes/federation/apis/federation" fedapi "k8s.io/kubernetes/federation/apis/federation"
) )
// Planner decides how many out of the given replicas should be placed in each of the // Planner decides how many out of the given replicas should be placed in each of the
// federated clusters. // federated clusters.
type Planner struct { type Planner struct {
preferences *fed_api.FederatedReplicaSetPreferences preferences *fedapi.FederatedReplicaSetPreferences
} }
type namedClusterReplicaSetPreferences struct { type namedClusterReplicaSetPreferences struct {
clusterName string clusterName string
hash uint32 hash uint32
fed_api.ClusterReplicaSetPreferences fedapi.ClusterReplicaSetPreferences
} }
type byWeight []*namedClusterReplicaSetPreferences type byWeight []*namedClusterReplicaSetPreferences
@@ -46,7 +46,7 @@ func (a byWeight) Less(i, j int) bool {
return (a[i].Weight > a[j].Weight) || (a[i].Weight == a[j].Weight && a[i].hash < a[j].hash) return (a[i].Weight > a[j].Weight) || (a[i].Weight == a[j].Weight && a[i].hash < a[j].hash)
} }
func NewPlanner(preferences *fed_api.FederatedReplicaSetPreferences) *Planner { func NewPlanner(preferences *fedapi.FederatedReplicaSetPreferences) *Planner {
return &Planner{ return &Planner{
preferences: preferences, preferences: preferences,
} }
@@ -71,7 +71,7 @@ func (p *Planner) Plan(replicasToDistribute int64, availableClusters []string, c
plan := make(map[string]int64, len(preferences)) plan := make(map[string]int64, len(preferences))
overflow := make(map[string]int64, len(preferences)) overflow := make(map[string]int64, len(preferences))
named := func(name string, pref fed_api.ClusterReplicaSetPreferences) *namedClusterReplicaSetPreferences { named := func(name string, pref fedapi.ClusterReplicaSetPreferences) *namedClusterReplicaSetPreferences {
// Seems to work better than addler for our case. // Seems to work better than addler for our case.
hasher := fnv.New32() hasher := fnv.New32()
hasher.Write([]byte(name)) hasher.Write([]byte(name))

View File

@@ -19,13 +19,13 @@ package planner
import ( import (
"testing" "testing"
fed_api "k8s.io/kubernetes/federation/apis/federation" fedapi "k8s.io/kubernetes/federation/apis/federation"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func doCheck(t *testing.T, pref map[string]fed_api.ClusterReplicaSetPreferences, replicas int64, clusters []string, expected map[string]int64) { func doCheck(t *testing.T, pref map[string]fedapi.ClusterReplicaSetPreferences, replicas int64, clusters []string, expected map[string]int64) {
planer := NewPlanner(&fed_api.FederatedReplicaSetPreferences{ planer := NewPlanner(&fedapi.FederatedReplicaSetPreferences{
Clusters: pref, Clusters: pref,
}) })
plan, overflow := planer.Plan(replicas, clusters, map[string]int64{}, map[string]int64{}, "") plan, overflow := planer.Plan(replicas, clusters, map[string]int64{}, map[string]int64{}, "")
@@ -33,9 +33,9 @@ func doCheck(t *testing.T, pref map[string]fed_api.ClusterReplicaSetPreferences,
assert.Equal(t, 0, len(overflow)) assert.Equal(t, 0, len(overflow))
} }
func doCheckWithExisting(t *testing.T, pref map[string]fed_api.ClusterReplicaSetPreferences, replicas int64, clusters []string, func doCheckWithExisting(t *testing.T, pref map[string]fedapi.ClusterReplicaSetPreferences, replicas int64, clusters []string,
existing map[string]int64, expected map[string]int64) { existing map[string]int64, expected map[string]int64) {
planer := NewPlanner(&fed_api.FederatedReplicaSetPreferences{ planer := NewPlanner(&fedapi.FederatedReplicaSetPreferences{
Clusters: pref, Clusters: pref,
}) })
plan, overflow := planer.Plan(replicas, clusters, existing, map[string]int64{}, "") plan, overflow := planer.Plan(replicas, clusters, existing, map[string]int64{}, "")
@@ -43,12 +43,12 @@ func doCheckWithExisting(t *testing.T, pref map[string]fed_api.ClusterReplicaSet
assert.EqualValues(t, expected, plan) assert.EqualValues(t, expected, plan)
} }
func doCheckWithExistingAndCapacity(t *testing.T, rebalance bool, pref map[string]fed_api.ClusterReplicaSetPreferences, replicas int64, clusters []string, func doCheckWithExistingAndCapacity(t *testing.T, rebalance bool, pref map[string]fedapi.ClusterReplicaSetPreferences, replicas int64, clusters []string,
existing map[string]int64, existing map[string]int64,
capacity map[string]int64, capacity map[string]int64,
expected map[string]int64, expected map[string]int64,
expectedOverflow map[string]int64) { expectedOverflow map[string]int64) {
planer := NewPlanner(&fed_api.FederatedReplicaSetPreferences{ planer := NewPlanner(&fedapi.FederatedReplicaSetPreferences{
Rebalance: rebalance, Rebalance: rebalance,
Clusters: pref, Clusters: pref,
}) })
@@ -62,102 +62,102 @@ func pint(val int64) *int64 {
} }
func TestEqual(t *testing.T) { func TestEqual(t *testing.T) {
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
50, []string{"A", "B", "C"}, 50, []string{"A", "B", "C"},
// hash dependent // hash dependent
map[string]int64{"A": 16, "B": 17, "C": 17}) map[string]int64{"A": 16, "B": 17, "C": 17})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
50, []string{"A", "B"}, 50, []string{"A", "B"},
map[string]int64{"A": 25, "B": 25}) map[string]int64{"A": 25, "B": 25})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
1, []string{"A", "B"}, 1, []string{"A", "B"},
// hash dependent // hash dependent
map[string]int64{"A": 0, "B": 1}) map[string]int64{"A": 0, "B": 1})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
1, []string{"A", "B", "C", "D"}, 1, []string{"A", "B", "C", "D"},
// hash dependent // hash dependent
map[string]int64{"A": 0, "B": 0, "C": 0, "D": 1}) map[string]int64{"A": 0, "B": 0, "C": 0, "D": 1})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
1, []string{"A"}, 1, []string{"A"},
map[string]int64{"A": 1}) map[string]int64{"A": 1})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
1, []string{}, 1, []string{},
map[string]int64{}) map[string]int64{})
} }
func TestEqualWithExisting(t *testing.T) { func TestEqualWithExisting(t *testing.T) {
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
50, []string{"A", "B", "C"}, 50, []string{"A", "B", "C"},
map[string]int64{"C": 30}, map[string]int64{"C": 30},
map[string]int64{"A": 10, "B": 10, "C": 30}) map[string]int64{"A": 10, "B": 10, "C": 30})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
50, []string{"A", "B"}, 50, []string{"A", "B"},
map[string]int64{"A": 30}, map[string]int64{"A": 30},
map[string]int64{"A": 30, "B": 20}) map[string]int64{"A": 30, "B": 20})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
15, []string{"A", "B"}, 15, []string{"A", "B"},
map[string]int64{"A": 0, "B": 8}, map[string]int64{"A": 0, "B": 8},
map[string]int64{"A": 7, "B": 8}) map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
15, []string{"A", "B"}, 15, []string{"A", "B"},
map[string]int64{"A": 1, "B": 8}, map[string]int64{"A": 1, "B": 8},
map[string]int64{"A": 7, "B": 8}) map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
15, []string{"A", "B"}, 15, []string{"A", "B"},
map[string]int64{"A": 4, "B": 8}, map[string]int64{"A": 4, "B": 8},
map[string]int64{"A": 7, "B": 8}) map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
15, []string{"A", "B"}, 15, []string{"A", "B"},
map[string]int64{"A": 5, "B": 8}, map[string]int64{"A": 5, "B": 8},
map[string]int64{"A": 7, "B": 8}) map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
15, []string{"A", "B"}, 15, []string{"A", "B"},
map[string]int64{"A": 6, "B": 8}, map[string]int64{"A": 6, "B": 8},
map[string]int64{"A": 7, "B": 8}) map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
15, []string{"A", "B"}, 15, []string{"A", "B"},
map[string]int64{"A": 7, "B": 8}, map[string]int64{"A": 7, "B": 8},
map[string]int64{"A": 7, "B": 8}) map[string]int64{"A": 7, "B": 8})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
500000, []string{"A", "B"}, 500000, []string{"A", "B"},
map[string]int64{"A": 300000}, map[string]int64{"A": 300000},
map[string]int64{"A": 300000, "B": 200000}) map[string]int64{"A": 300000, "B": 200000})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
50, []string{"A", "B"}, 50, []string{"A", "B"},
map[string]int64{"A": 10}, map[string]int64{"A": 10},
map[string]int64{"A": 25, "B": 25}) map[string]int64{"A": 25, "B": 25})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
50, []string{"A", "B"}, 50, []string{"A", "B"},
map[string]int64{"A": 10, "B": 70}, map[string]int64{"A": 10, "B": 70},
@@ -165,13 +165,13 @@ func TestEqualWithExisting(t *testing.T) {
// TODO: Should be 10:40, update algorithm. Issue: #31816 // TODO: Should be 10:40, update algorithm. Issue: #31816
map[string]int64{"A": 0, "B": 50}) map[string]int64{"A": 0, "B": 50})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
1, []string{"A", "B"}, 1, []string{"A", "B"},
map[string]int64{"A": 30}, map[string]int64{"A": 30},
map[string]int64{"A": 1, "B": 0}) map[string]int64{"A": 1, "B": 0})
doCheckWithExisting(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExisting(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
50, []string{"A", "B"}, 50, []string{"A", "B"},
map[string]int64{"A": 10, "B": 20}, map[string]int64{"A": 10, "B": 20},
@@ -180,7 +180,7 @@ func TestEqualWithExisting(t *testing.T) {
func TestWithExistingAndCapacity(t *testing.T) { func TestWithExistingAndCapacity(t *testing.T) {
// desired without capacity: map[string]int64{"A": 17, "B": 17, "C": 16}) // desired without capacity: map[string]int64{"A": 17, "B": 17, "C": 16})
doCheckWithExistingAndCapacity(t, true, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1}}, "*": {Weight: 1}},
50, []string{"A", "B", "C"}, 50, []string{"A", "B", "C"},
map[string]int64{}, map[string]int64{},
@@ -189,7 +189,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{"C": 7}) map[string]int64{"C": 7})
// desired B:50 C:0 // desired B:50 C:0
doCheckWithExistingAndCapacity(t, true, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000}, "A": {Weight: 10000},
"B": {Weight: 1}}, "B": {Weight: 1}},
50, []string{"B", "C"}, 50, []string{"B", "C"},
@@ -200,7 +200,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
) )
// desired A:20 B:40 // desired A:20 B:40
doCheckWithExistingAndCapacity(t, true, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 1}, "A": {Weight: 1},
"B": {Weight: 2}}, "B": {Weight: 2}},
60, []string{"A", "B", "C"}, 60, []string{"A", "B", "C"},
@@ -210,7 +210,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{"B": 30}) map[string]int64{"B": 30})
// map[string]int64{"A": 10, "B": 30, "C": 21, "D": 10}) // map[string]int64{"A": 10, "B": 30, "C": 21, "D": 10})
doCheckWithExistingAndCapacity(t, true, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)}, "A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1}, "B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(21)}, "C": {Weight: 1, MaxReplicas: pint(21)},
@@ -223,7 +223,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
) )
// desired A:20 B:20 // desired A:20 B:20
doCheckWithExistingAndCapacity(t, false, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 1}, "A": {Weight: 1},
"B": {Weight: 1}}, "B": {Weight: 1}},
60, []string{"A", "B", "C"}, 60, []string{"A", "B", "C"},
@@ -233,7 +233,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{"A": 20, "B": 20}) map[string]int64{"A": 20, "B": 20})
// desired A:10 B:50 although A:50 B:10 is fuly acceptable because rebalance = false // desired A:10 B:50 although A:50 B:10 is fuly acceptable because rebalance = false
doCheckWithExistingAndCapacity(t, false, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 1}, "A": {Weight: 1},
"B": {Weight: 5}}, "B": {Weight: 5}},
60, []string{"A", "B", "C"}, 60, []string{"A", "B", "C"},
@@ -242,7 +242,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{"A": 50, "B": 10, "C": 0}, map[string]int64{"A": 50, "B": 10, "C": 0},
map[string]int64{}) map[string]int64{})
doCheckWithExistingAndCapacity(t, false, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExistingAndCapacity(t, false, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 20, Weight: 0}}, "*": {MinReplicas: 20, Weight: 0}},
50, []string{"A", "B", "C"}, 50, []string{"A", "B", "C"},
map[string]int64{}, map[string]int64{},
@@ -251,7 +251,7 @@ func TestWithExistingAndCapacity(t *testing.T) {
map[string]int64{}) map[string]int64{})
// Actually we would like to have extra 20 in B but 15 is also good. // Actually we would like to have extra 20 in B but 15 is also good.
doCheckWithExistingAndCapacity(t, true, map[string]fed_api.ClusterReplicaSetPreferences{ doCheckWithExistingAndCapacity(t, true, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 20, Weight: 1}}, "*": {MinReplicas: 20, Weight: 1}},
60, []string{"A", "B"}, 60, []string{"A", "B"},
map[string]int64{}, map[string]int64{},
@@ -261,75 +261,75 @@ func TestWithExistingAndCapacity(t *testing.T) {
} }
func TestMin(t *testing.T) { func TestMin(t *testing.T) {
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 2, Weight: 0}}, "*": {MinReplicas: 2, Weight: 0}},
50, []string{"A", "B", "C"}, 50, []string{"A", "B", "C"},
map[string]int64{"A": 2, "B": 2, "C": 2}) map[string]int64{"A": 2, "B": 2, "C": 2})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 20, Weight: 0}}, "*": {MinReplicas: 20, Weight: 0}},
50, []string{"A", "B", "C"}, 50, []string{"A", "B", "C"},
// hash dependant. // hash dependant.
map[string]int64{"A": 10, "B": 20, "C": 20}) map[string]int64{"A": 10, "B": 20, "C": 20})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 20, Weight: 0}, "*": {MinReplicas: 20, Weight: 0},
"A": {MinReplicas: 100, Weight: 1}}, "A": {MinReplicas: 100, Weight: 1}},
50, []string{"A", "B", "C"}, 50, []string{"A", "B", "C"},
map[string]int64{"A": 50, "B": 0, "C": 0}) map[string]int64{"A": 50, "B": 0, "C": 0})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {MinReplicas: 10, Weight: 1, MaxReplicas: pint(12)}}, "*": {MinReplicas: 10, Weight: 1, MaxReplicas: pint(12)}},
50, []string{"A", "B", "C"}, 50, []string{"A", "B", "C"},
map[string]int64{"A": 12, "B": 12, "C": 12}) map[string]int64{"A": 12, "B": 12, "C": 12})
} }
func TestMax(t *testing.T) { func TestMax(t *testing.T) {
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 1, MaxReplicas: pint(2)}}, "*": {Weight: 1, MaxReplicas: pint(2)}},
50, []string{"A", "B", "C"}, 50, []string{"A", "B", "C"},
map[string]int64{"A": 2, "B": 2, "C": 2}) map[string]int64{"A": 2, "B": 2, "C": 2})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"*": {Weight: 0, MaxReplicas: pint(2)}}, "*": {Weight: 0, MaxReplicas: pint(2)}},
50, []string{"A", "B", "C"}, 50, []string{"A", "B", "C"},
map[string]int64{"A": 0, "B": 0, "C": 0}) map[string]int64{"A": 0, "B": 0, "C": 0})
} }
func TestWeight(t *testing.T) { func TestWeight(t *testing.T) {
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 1}, "A": {Weight: 1},
"B": {Weight: 2}}, "B": {Weight: 2}},
60, []string{"A", "B", "C"}, 60, []string{"A", "B", "C"},
map[string]int64{"A": 20, "B": 40, "C": 0}) map[string]int64{"A": 20, "B": 40, "C": 0})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000}, "A": {Weight: 10000},
"B": {Weight: 1}}, "B": {Weight: 1}},
50, []string{"A", "B", "C"}, 50, []string{"A", "B", "C"},
map[string]int64{"A": 50, "B": 0, "C": 0}) map[string]int64{"A": 50, "B": 0, "C": 0})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000}, "A": {Weight: 10000},
"B": {Weight: 1}}, "B": {Weight: 1}},
50, []string{"B", "C"}, 50, []string{"B", "C"},
map[string]int64{"B": 50, "C": 0}) map[string]int64{"B": 50, "C": 0})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)}, "A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1}, "B": {Weight: 1},
"C": {Weight: 1}}, "C": {Weight: 1}},
50, []string{"A", "B", "C"}, 50, []string{"A", "B", "C"},
map[string]int64{"A": 10, "B": 20, "C": 20}) map[string]int64{"A": 10, "B": 20, "C": 20})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)}, "A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1}, "B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(10)}}, "C": {Weight: 1, MaxReplicas: pint(10)}},
50, []string{"A", "B", "C"}, 50, []string{"A", "B", "C"},
map[string]int64{"A": 10, "B": 30, "C": 10}) map[string]int64{"A": 10, "B": 30, "C": 10})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)}, "A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1}, "B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(21)}, "C": {Weight: 1, MaxReplicas: pint(21)},
@@ -337,7 +337,7 @@ func TestWeight(t *testing.T) {
71, []string{"A", "B", "C", "D"}, 71, []string{"A", "B", "C", "D"},
map[string]int64{"A": 10, "B": 30, "C": 21, "D": 10}) map[string]int64{"A": 10, "B": 30, "C": 21, "D": 10})
doCheck(t, map[string]fed_api.ClusterReplicaSetPreferences{ doCheck(t, map[string]fedapi.ClusterReplicaSetPreferences{
"A": {Weight: 10000, MaxReplicas: pint(10)}, "A": {Weight: 10000, MaxReplicas: pint(10)},
"B": {Weight: 1}, "B": {Weight: 1},
"C": {Weight: 1, MaxReplicas: pint(21)}, "C": {Weight: 1, MaxReplicas: pint(21)},

View File

@@ -24,10 +24,10 @@ import (
"sync" "sync"
"time" "time"
federation_api "k8s.io/kubernetes/federation/apis/federation/v1beta1" federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/pkg/federation-controller/util" "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
api_v1 "k8s.io/kubernetes/pkg/api/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@@ -250,7 +250,7 @@ func CheckObjectFromChan(c chan runtime.Object, checkFunction CheckingFunction)
} }
// CompareObjectMeta returns an error when the given objects are not equivalent. // CompareObjectMeta returns an error when the given objects are not equivalent.
func CompareObjectMeta(a, b api_v1.ObjectMeta) error { func CompareObjectMeta(a, b apiv1.ObjectMeta) error {
if a.Namespace != b.Namespace { if a.Namespace != b.Namespace {
return fmt.Errorf("Different namespace expected:%s observed:%s", a.Namespace, b.Namespace) return fmt.Errorf("Different namespace expected:%s observed:%s", a.Namespace, b.Namespace)
} }
@@ -272,15 +272,15 @@ func ToFederatedInformerForTestOnly(informer util.FederatedInformer) util.Federa
} }
// NewCluster builds a new cluster object. // NewCluster builds a new cluster object.
func NewCluster(name string, readyStatus api_v1.ConditionStatus) *federation_api.Cluster { func NewCluster(name string, readyStatus apiv1.ConditionStatus) *federationapi.Cluster {
return &federation_api.Cluster{ return &federationapi.Cluster{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: apiv1.ObjectMeta{
Name: name, Name: name,
Annotations: map[string]string{}, Annotations: map[string]string{},
}, },
Status: federation_api.ClusterStatus{ Status: federationapi.ClusterStatus{
Conditions: []federation_api.ClusterCondition{ Conditions: []federationapi.ClusterCondition{
{Type: federation_api.ClusterReady, Status: readyStatus}, {Type: federationapi.ClusterReady, Status: readyStatus},
}, },
}, },
} }

34
hack/verify-pkg-names.sh Executable file
View File

@@ -0,0 +1,34 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Verify whether codes follow golang convention.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::verify_go_version
cd "${KUBE_ROOT}"
if git --no-pager grep -E $'^(import |\t)[a-z]+[A-Z_][a-zA-Z]* "[^"]+"$' -- '**/*.go' ':(exclude)vendor/*' ':(exclude)staging/*'; then
echo "!!! Some package aliase break go conventions."
echo "To fix these errors, do not use capitalizaed or underlined characters"
echo "in pkg aliases. Refer to https://blog.golang.org/package-names for more info."
exit 1
fi

View File

@@ -62,6 +62,18 @@ else
fi fi
echo "${reset}" echo "${reset}"
echo -ne "Checking for package aliases... "
if ! hack/verify-pkg-names.sh > /dev/null; then
echo "${red}ERROR!"
echo "Some package aliase break go conventions. To fix these errors, "
echo "do not use capitalizaed or underlined characters in pkg aliases. "
echo "Refer to https://blog.golang.org/package-names for more info."
exit_code=1
else
echo "${green}OK"
fi
echo "${reset}"
echo -ne "Checking for files that need boilerplate... " echo -ne "Checking for files that need boilerplate... "
files=($(git diff --cached --name-only --diff-filter ACM)) files=($(git diff --cached --name-only --diff-filter ACM))
# We always make sure there is one file in the files list. Some tools check # We always make sure there is one file in the files list. Some tools check

View File

@@ -45,7 +45,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/api/v1/service"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
aws_credentials "k8s.io/kubernetes/pkg/credentialprovider/aws" awscredentials "k8s.io/kubernetes/pkg/credentialprovider/aws"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
@@ -172,7 +172,7 @@ const DefaultVolumeType = "gp2"
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits // See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits
const DefaultMaxEBSVolumes = 39 const DefaultMaxEBSVolumes = 39
// Used to call aws_credentials.Init() just once // Used to call awscredentials.Init() just once
var once sync.Once var once sync.Once
// Services is an abstraction over AWS, to allow mocking/other implementations // Services is an abstraction over AWS, to allow mocking/other implementations
@@ -720,7 +720,7 @@ func getAvailabilityZone(metadata EC2Metadata) (string, error) {
} }
func isRegionValid(region string) bool { func isRegionValid(region string) bool {
for _, r := range aws_credentials.AWSRegions { for _, r := range awscredentials.AWSRegions {
if r == region { if r == region {
return true return true
} }
@@ -836,7 +836,7 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) {
// Register handler for ECR credentials // Register handler for ECR credentials
once.Do(func() { once.Do(func() {
aws_credentials.Init() awscredentials.Init()
}) })
return awsCloud, nil return awsCloud, nil

View File

@@ -32,11 +32,11 @@ import (
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips" "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers" "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers"
v2_monitors "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" v2monitors "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors"
v2_pools "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" v2pools "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups" "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules" "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules"
neutron_ports "github.com/rackspace/gophercloud/openstack/networking/v2/ports" neutronports "github.com/rackspace/gophercloud/openstack/networking/v2/ports"
"github.com/rackspace/gophercloud/pagination" "github.com/rackspace/gophercloud/pagination"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
@@ -80,12 +80,12 @@ func networkExtensions(client *gophercloud.ServiceClient) (map[string]bool, erro
return seen, err return seen, err
} }
func getPortByIP(client *gophercloud.ServiceClient, ipAddress string) (neutron_ports.Port, error) { func getPortByIP(client *gophercloud.ServiceClient, ipAddress string) (neutronports.Port, error) {
var targetPort neutron_ports.Port var targetPort neutronports.Port
var portFound = false var portFound = false
err := neutron_ports.List(client, neutron_ports.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { err := neutronports.List(client, neutronports.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
portList, err := neutron_ports.ExtractPorts(page) portList, err := neutronports.ExtractPorts(page)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -293,10 +293,10 @@ func getListenerForPort(existingListeners []listeners.Listener, port v1.ServiceP
} }
// Get pool for a listener. A listener always has exactly one pool. // Get pool for a listener. A listener always has exactly one pool.
func getPoolByListenerID(client *gophercloud.ServiceClient, loadbalancerID string, listenerID string) (*v2_pools.Pool, error) { func getPoolByListenerID(client *gophercloud.ServiceClient, loadbalancerID string, listenerID string) (*v2pools.Pool, error) {
listenerPools := make([]v2_pools.Pool, 0, 1) listenerPools := make([]v2pools.Pool, 0, 1)
err := v2_pools.List(client, v2_pools.ListOpts{LoadbalancerID: loadbalancerID}).EachPage(func(page pagination.Page) (bool, error) { err := v2pools.List(client, v2pools.ListOpts{LoadbalancerID: loadbalancerID}).EachPage(func(page pagination.Page) (bool, error) {
poolsList, err := v2_pools.ExtractPools(page) poolsList, err := v2pools.ExtractPools(page)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -328,10 +328,10 @@ func getPoolByListenerID(client *gophercloud.ServiceClient, loadbalancerID strin
return &listenerPools[0], nil return &listenerPools[0], nil
} }
func getMembersByPoolID(client *gophercloud.ServiceClient, id string) ([]v2_pools.Member, error) { func getMembersByPoolID(client *gophercloud.ServiceClient, id string) ([]v2pools.Member, error) {
var members []v2_pools.Member var members []v2pools.Member
err := v2_pools.ListAssociateMembers(client, id, v2_pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) { err := v2pools.ListAssociateMembers(client, id, v2pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
membersList, err := v2_pools.ExtractMembers(page) membersList, err := v2pools.ExtractMembers(page)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -347,10 +347,10 @@ func getMembersByPoolID(client *gophercloud.ServiceClient, id string) ([]v2_pool
} }
// Each pool has exactly one or zero monitors. ListOpts does not seem to filter anything. // Each pool has exactly one or zero monitors. ListOpts does not seem to filter anything.
func getMonitorByPoolID(client *gophercloud.ServiceClient, id string) (*v2_monitors.Monitor, error) { func getMonitorByPoolID(client *gophercloud.ServiceClient, id string) (*v2monitors.Monitor, error) {
var monitorList []v2_monitors.Monitor var monitorList []v2monitors.Monitor
err := v2_monitors.List(client, v2_monitors.ListOpts{PoolID: id}).EachPage(func(page pagination.Page) (bool, error) { err := v2monitors.List(client, v2monitors.ListOpts{PoolID: id}).EachPage(func(page pagination.Page) (bool, error) {
monitorsList, err := v2_monitors.ExtractMonitors(page) monitorsList, err := v2monitors.ExtractMonitors(page)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -385,7 +385,7 @@ func getMonitorByPoolID(client *gophercloud.ServiceClient, id string) (*v2_monit
} }
// Check if a member exists for node // Check if a member exists for node
func memberExists(members []v2_pools.Member, addr string, port int) bool { func memberExists(members []v2pools.Member, addr string, port int) bool {
for _, member := range members { for _, member := range members {
if member.Address == addr && member.ProtocolPort == port { if member.Address == addr && member.ProtocolPort == port {
return true return true
@@ -407,7 +407,7 @@ func popListener(existingListeners []listeners.Listener, id string) []listeners.
return existingListeners return existingListeners
} }
func popMember(members []v2_pools.Member, addr string, port int) []v2_pools.Member { func popMember(members []v2pools.Member, addr string, port int) []v2pools.Member {
for i, member := range members { for i, member := range members {
if member.Address == addr && member.ProtocolPort == port { if member.Address == addr && member.ProtocolPort == port {
members[i] = members[len(members)-1] members[i] = members[len(members)-1]
@@ -596,12 +596,12 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
} }
affinity := v1.ServiceAffinityNone affinity := v1.ServiceAffinityNone
var persistence *v2_pools.SessionPersistence var persistence *v2pools.SessionPersistence
switch affinity { switch affinity {
case v1.ServiceAffinityNone: case v1.ServiceAffinityNone:
persistence = nil persistence = nil
case v1.ServiceAffinityClientIP: case v1.ServiceAffinityClientIP:
persistence = &v2_pools.SessionPersistence{Type: "SOURCE_IP"} persistence = &v2pools.SessionPersistence{Type: "SOURCE_IP"}
default: default:
return nil, fmt.Errorf("unsupported load balancer affinity: %v", affinity) return nil, fmt.Errorf("unsupported load balancer affinity: %v", affinity)
} }
@@ -624,9 +624,9 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID)
lbmethod := v2_pools.LBMethod(lbaas.opts.LBMethod) lbmethod := v2pools.LBMethod(lbaas.opts.LBMethod)
if lbmethod == "" { if lbmethod == "" {
lbmethod = v2_pools.LBMethodRoundRobin lbmethod = v2pools.LBMethodRoundRobin
} }
oldListeners, err := getListenersByLoadBalancerID(lbaas.network, loadbalancer.ID) oldListeners, err := getListenersByLoadBalancerID(lbaas.network, loadbalancer.ID)
@@ -662,9 +662,9 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
} }
if pool == nil { if pool == nil {
glog.V(4).Infof("Creating pool for listener %s", listener.ID) glog.V(4).Infof("Creating pool for listener %s", listener.ID)
pool, err = v2_pools.Create(lbaas.network, v2_pools.CreateOpts{ pool, err = v2pools.Create(lbaas.network, v2pools.CreateOpts{
Name: fmt.Sprintf("pool_%s_%d", name, portIndex), Name: fmt.Sprintf("pool_%s_%d", name, portIndex),
Protocol: v2_pools.Protocol(port.Protocol), Protocol: v2pools.Protocol(port.Protocol),
LBMethod: lbmethod, LBMethod: lbmethod,
ListenerID: listener.ID, ListenerID: listener.ID,
Persistence: persistence, Persistence: persistence,
@@ -695,7 +695,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
if !memberExists(members, addr, int(port.NodePort)) { if !memberExists(members, addr, int(port.NodePort)) {
glog.V(4).Infof("Creating member for pool %s", pool.ID) glog.V(4).Infof("Creating member for pool %s", pool.ID)
_, err := v2_pools.CreateAssociateMember(lbaas.network, pool.ID, v2_pools.MemberCreateOpts{ _, err := v2pools.CreateAssociateMember(lbaas.network, pool.ID, v2pools.MemberCreateOpts{
ProtocolPort: int(port.NodePort), ProtocolPort: int(port.NodePort),
Address: addr, Address: addr,
SubnetID: lbaas.opts.SubnetId, SubnetID: lbaas.opts.SubnetId,
@@ -716,7 +716,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
// Delete obsolete members for this pool // Delete obsolete members for this pool
for _, member := range members { for _, member := range members {
glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address)
err := v2_pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr() err := v2pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr()
if err != nil && !isNotFound(err) { if err != nil && !isNotFound(err) {
return nil, fmt.Errorf("Error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) return nil, fmt.Errorf("Error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err)
} }
@@ -726,7 +726,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
monitorID := pool.MonitorID monitorID := pool.MonitorID
if monitorID == "" && lbaas.opts.CreateMonitor { if monitorID == "" && lbaas.opts.CreateMonitor {
glog.V(4).Infof("Creating monitor for pool %s", pool.ID) glog.V(4).Infof("Creating monitor for pool %s", pool.ID)
monitor, err := v2_monitors.Create(lbaas.network, v2_monitors.CreateOpts{ monitor, err := v2monitors.Create(lbaas.network, v2monitors.CreateOpts{
PoolID: pool.ID, PoolID: pool.ID,
Type: string(port.Protocol), Type: string(port.Protocol),
Delay: int(lbaas.opts.MonitorDelay.Duration.Seconds()), Delay: int(lbaas.opts.MonitorDelay.Duration.Seconds()),
@@ -756,7 +756,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
monitorID := pool.MonitorID monitorID := pool.MonitorID
if monitorID != "" { if monitorID != "" {
glog.V(4).Infof("Deleting obsolete monitor %s for pool %s", monitorID, pool.ID) glog.V(4).Infof("Deleting obsolete monitor %s for pool %s", monitorID, pool.ID)
err = v2_monitors.Delete(lbaas.network, monitorID).ExtractErr() err = v2monitors.Delete(lbaas.network, monitorID).ExtractErr()
if err != nil && !isNotFound(err) { if err != nil && !isNotFound(err) {
return nil, fmt.Errorf("Error deleting obsolete monitor %s for pool %s: %v", monitorID, pool.ID, err) return nil, fmt.Errorf("Error deleting obsolete monitor %s for pool %s: %v", monitorID, pool.ID, err)
} }
@@ -770,7 +770,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
if members != nil { if members != nil {
for _, member := range members { for _, member := range members {
glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address)
err := v2_pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr() err := v2pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr()
if err != nil && !isNotFound(err) { if err != nil && !isNotFound(err) {
return nil, fmt.Errorf("Error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) return nil, fmt.Errorf("Error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err)
} }
@@ -779,7 +779,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
} }
glog.V(4).Infof("Deleting obsolete pool %s for listener %s", pool.ID, listener.ID) glog.V(4).Infof("Deleting obsolete pool %s for listener %s", pool.ID, listener.ID)
// delete pool // delete pool
err = v2_pools.Delete(lbaas.network, pool.ID).ExtractErr() err = v2pools.Delete(lbaas.network, pool.ID).ExtractErr()
if err != nil && !isNotFound(err) { if err != nil && !isNotFound(err) {
return nil, fmt.Errorf("Error deleting obsolete pool %s for listener %s: %v", pool.ID, listener.ID, err) return nil, fmt.Errorf("Error deleting obsolete pool %s for listener %s: %v", pool.ID, listener.ID, err)
} }
@@ -923,9 +923,9 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv
return nil, err return nil, err
} }
update_opts := neutron_ports.UpdateOpts{SecurityGroups: []string{lbSecGroup.ID}} update_opts := neutronports.UpdateOpts{SecurityGroups: []string{lbSecGroup.ID}}
res := neutron_ports.Update(lbaas.network, port.ID, update_opts) res := neutronports.Update(lbaas.network, port.ID, update_opts)
if res.Err != nil { if res.Err != nil {
glog.Errorf("Error occured updating port: %s", port.ID) glog.Errorf("Error occured updating port: %s", port.ID)
@@ -986,9 +986,9 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service
} }
// Get all pools for this loadbalancer, by listener ID. // Get all pools for this loadbalancer, by listener ID.
lbPools := make(map[string]v2_pools.Pool) lbPools := make(map[string]v2pools.Pool)
err = v2_pools.List(lbaas.network, v2_pools.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) { err = v2pools.List(lbaas.network, v2pools.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) {
poolsList, err := v2_pools.ExtractPools(page) poolsList, err := v2pools.ExtractPools(page)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -1038,9 +1038,9 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service
} }
// Find existing pool members (by address) for this port // Find existing pool members (by address) for this port
members := make(map[string]v2_pools.Member) members := make(map[string]v2pools.Member)
err := v2_pools.ListAssociateMembers(lbaas.network, pool.ID, v2_pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) { err := v2pools.ListAssociateMembers(lbaas.network, pool.ID, v2pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
membersList, err := v2_pools.ExtractMembers(page) membersList, err := v2pools.ExtractMembers(page)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -1059,7 +1059,7 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service
// Already exists, do not create member // Already exists, do not create member
continue continue
} }
_, err := v2_pools.CreateAssociateMember(lbaas.network, pool.ID, v2_pools.MemberCreateOpts{ _, err := v2pools.CreateAssociateMember(lbaas.network, pool.ID, v2pools.MemberCreateOpts{
Address: addr, Address: addr,
ProtocolPort: int(port.NodePort), ProtocolPort: int(port.NodePort),
SubnetID: lbaas.opts.SubnetId, SubnetID: lbaas.opts.SubnetId,
@@ -1076,7 +1076,7 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service
// Still present, do not delete member // Still present, do not delete member
continue continue
} }
err = v2_pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr() err = v2pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr()
if err != nil && !isNotFound(err) { if err != nil && !isNotFound(err) {
return err return err
} }
@@ -1137,8 +1137,8 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
// get all pools (and health monitors) associated with this loadbalancer // get all pools (and health monitors) associated with this loadbalancer
var poolIDs []string var poolIDs []string
var monitorIDs []string var monitorIDs []string
err = v2_pools.List(lbaas.network, v2_pools.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) { err = v2pools.List(lbaas.network, v2pools.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) {
poolsList, err := v2_pools.ExtractPools(page) poolsList, err := v2pools.ExtractPools(page)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -1157,8 +1157,8 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
// get all members associated with each poolIDs // get all members associated with each poolIDs
var memberIDs []string var memberIDs []string
for _, poolID := range poolIDs { for _, poolID := range poolIDs {
err := v2_pools.ListAssociateMembers(lbaas.network, poolID, v2_pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) { err := v2pools.ListAssociateMembers(lbaas.network, poolID, v2pools.MemberListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
membersList, err := v2_pools.ExtractMembers(page) membersList, err := v2pools.ExtractMembers(page)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -1176,7 +1176,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
// delete all monitors // delete all monitors
for _, monitorID := range monitorIDs { for _, monitorID := range monitorIDs {
err := v2_monitors.Delete(lbaas.network, monitorID).ExtractErr() err := v2monitors.Delete(lbaas.network, monitorID).ExtractErr()
if err != nil && !isNotFound(err) { if err != nil && !isNotFound(err) {
return err return err
} }
@@ -1187,7 +1187,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
for _, poolID := range poolIDs { for _, poolID := range poolIDs {
// delete all members for this pool // delete all members for this pool
for _, memberID := range memberIDs { for _, memberID := range memberIDs {
err := v2_pools.DeleteMember(lbaas.network, poolID, memberID).ExtractErr() err := v2pools.DeleteMember(lbaas.network, poolID, memberID).ExtractErr()
if err != nil && !isNotFound(err) { if err != nil && !isNotFound(err) {
return err return err
} }
@@ -1195,7 +1195,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
} }
// delete pool // delete pool
err := v2_pools.Delete(lbaas.network, poolID).ExtractErr() err := v2pools.Delete(lbaas.network, poolID).ExtractErr()
if err != nil && !isNotFound(err) { if err != nil && !isNotFound(err) {
return err return err
} }

View File

@@ -25,7 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
api_pod "k8s.io/kubernetes/pkg/api/v1/pod" apipod "k8s.io/kubernetes/pkg/api/v1/pod"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
@@ -211,8 +211,8 @@ func (f *fakePetClient) Update(expected, wanted *pcb) error {
pets := []*pcb{} pets := []*pcb{}
for i, pet := range f.pets { for i, pet := range f.pets {
if wanted.pod.Name == pet.pod.Name { if wanted.pod.Name == pet.pod.Name {
f.pets[i].pod.Annotations[api_pod.PodHostnameAnnotation] = wanted.pod.Annotations[api_pod.PodHostnameAnnotation] f.pets[i].pod.Annotations[apipod.PodHostnameAnnotation] = wanted.pod.Annotations[apipod.PodHostnameAnnotation]
f.pets[i].pod.Annotations[api_pod.PodSubdomainAnnotation] = wanted.pod.Annotations[api_pod.PodSubdomainAnnotation] f.pets[i].pod.Annotations[apipod.PodSubdomainAnnotation] = wanted.pod.Annotations[apipod.PodSubdomainAnnotation]
f.pets[i].pod.Spec = wanted.pod.Spec f.pets[i].pod.Spec = wanted.pod.Spec
found = true found = true
} }

View File

@@ -24,7 +24,7 @@ import (
"testing" "testing"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
api_pod "k8s.io/kubernetes/pkg/api/v1/pod" apipod "k8s.io/kubernetes/pkg/api/v1/pod"
) )
func TestPetIDName(t *testing.T) { func TestPetIDName(t *testing.T) {
@@ -54,11 +54,11 @@ func TestPetIDDNS(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to generate pet %v", err) t.Fatalf("Failed to generate pet %v", err)
} }
if hostname, ok := pod.Annotations[api_pod.PodHostnameAnnotation]; !ok || hostname != petName { if hostname, ok := pod.Annotations[apipod.PodHostnameAnnotation]; !ok || hostname != petName {
t.Errorf("Wrong hostname: %v", hostname) t.Errorf("Wrong hostname: %v", hostname)
} }
// TODO: Check this against the governing service. // TODO: Check this against the governing service.
if subdomain, ok := pod.Annotations[api_pod.PodSubdomainAnnotation]; !ok || subdomain != petSubdomain { if subdomain, ok := pod.Annotations[apipod.PodSubdomainAnnotation]; !ok || subdomain != petSubdomain {
t.Errorf("Wrong subdomain: %v", subdomain) t.Errorf("Wrong subdomain: %v", subdomain)
} }
} }

View File

@@ -25,7 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
fake_internal "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake" fakeinternal "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1/fake"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
@@ -281,7 +281,7 @@ func TestSyncStatefulSetBlockedPet(t *testing.T) {
} }
type fakeClient struct { type fakeClient struct {
fake_internal.Clientset fakeinternal.Clientset
statefulsetClient *fakeStatefulSetClient statefulsetClient *fakeStatefulSetClient
} }

View File

@@ -43,7 +43,7 @@ import (
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
heapster "k8s.io/heapster/metrics/api/v1/types" heapster "k8s.io/heapster/metrics/api/v1/types"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1" metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -298,15 +298,15 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
var heapsterRawMemResponse []byte var heapsterRawMemResponse []byte
if tc.useMetricsApi { if tc.useMetricsApi {
metrics := metrics_api.PodMetricsList{} metrics := metricsapi.PodMetricsList{}
for i, cpu := range tc.reportedLevels { for i, cpu := range tc.reportedLevels {
podMetric := metrics_api.PodMetrics{ podMetric := metricsapi.PodMetrics{
ObjectMeta: v1.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", podNamePrefix, i), Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
Namespace: namespace, Namespace: namespace,
}, },
Timestamp: unversioned.Time{Time: time.Now()}, Timestamp: unversioned.Time{Time: time.Now()},
Containers: []metrics_api.ContainerMetrics{ Containers: []metricsapi.ContainerMetrics{
{ {
Name: "container", Name: "container",
Usage: v1.ResourceList{ Usage: v1.ResourceList{

View File

@@ -29,7 +29,7 @@ import (
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
heapster "k8s.io/heapster/metrics/api/v1/types" heapster "k8s.io/heapster/metrics/api/v1/types"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1" metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
) )
// PodResourceInfo contains pod resourcemetric values as a map from pod names to // PodResourceInfo contains pod resourcemetric values as a map from pod names to
@@ -92,7 +92,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw)) glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
metrics := metrics_api.PodMetricsList{} metrics := metricsapi.PodMetricsList{}
err = json.Unmarshal(resultRaw, &metrics) err = json.Unmarshal(resultRaw, &metrics)
if err != nil { if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err) return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)

View File

@@ -34,7 +34,7 @@ import (
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
heapster "k8s.io/heapster/metrics/api/v1/types" heapster "k8s.io/heapster/metrics/api/v1/types"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1" metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -103,18 +103,18 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
if isResource { if isResource {
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) { fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
metrics := metrics_api.PodMetricsList{} metrics := metricsapi.PodMetricsList{}
for i, containers := range tc.reportedPodMetrics { for i, containers := range tc.reportedPodMetrics {
metric := metrics_api.PodMetrics{ metric := metricsapi.PodMetrics{
ObjectMeta: v1.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", podNamePrefix, i), Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
Namespace: namespace, Namespace: namespace,
}, },
Timestamp: unversioned.Time{Time: fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)}, Timestamp: unversioned.Time{Time: fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)},
Containers: []metrics_api.ContainerMetrics{}, Containers: []metricsapi.ContainerMetrics{},
} }
for j, cpu := range containers { for j, cpu := range containers {
cm := metrics_api.ContainerMetrics{ cm := metricsapi.ContainerMetrics{
Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j), Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j),
Usage: v1.ResourceList{ Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity( v1.ResourceCPU: *resource.NewMilliQuantity(

View File

@@ -36,7 +36,7 @@ import (
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
heapster "k8s.io/heapster/metrics/api/v1/types" heapster "k8s.io/heapster/metrics/api/v1/types"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1" metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@@ -131,15 +131,15 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
var heapsterRawMemResponse []byte var heapsterRawMemResponse []byte
if tc.resource != nil { if tc.resource != nil {
metrics := metrics_api.PodMetricsList{} metrics := metricsapi.PodMetricsList{}
for i, resValue := range tc.resource.levels { for i, resValue := range tc.resource.levels {
podMetric := metrics_api.PodMetrics{ podMetric := metricsapi.PodMetrics{
ObjectMeta: v1.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", podNamePrefix, i), Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
Namespace: testNamespace, Namespace: testNamespace,
}, },
Timestamp: unversioned.Time{Time: tc.timestamp}, Timestamp: unversioned.Time{Time: tc.timestamp},
Containers: []metrics_api.ContainerMetrics{ Containers: []metricsapi.ContainerMetrics{
{ {
Name: "container1", Name: "container1",
Usage: v1.ResourceList{ Usage: v1.ResourceList{

View File

@@ -29,12 +29,12 @@ import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1" unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkgruntime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
"k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@@ -99,7 +99,7 @@ type ServiceController struct {
// (like load balancers) in sync with the registry. // (like load balancers) in sync with the registry.
func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterName string) (*ServiceController, error) { func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterName string) (*ServiceController, error) {
broadcaster := record.NewBroadcaster() broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
recorder := broadcaster.NewRecorder(v1.EventSource{Component: "service-controller"}) recorder := broadcaster.NewRecorder(v1.EventSource{Component: "service-controller"})
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
@@ -121,7 +121,7 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
} }
s.serviceStore.Indexer, s.serviceController = cache.NewIndexerInformer( s.serviceStore.Indexer, s.serviceController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return s.kubeClient.Core().Services(v1.NamespaceAll).List(options) return s.kubeClient.Core().Services(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {

View File

@@ -27,7 +27,7 @@ import (
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1" storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1" unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/conversion"
@@ -62,7 +62,7 @@ func NewController(p ControllerParameters) *PersistentVolumeController {
eventRecorder := p.EventRecorder eventRecorder := p.EventRecorder
if eventRecorder == nil { if eventRecorder == nil {
broadcaster := record.NewBroadcaster() broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: p.KubeClient.Core().Events("")}) broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: p.KubeClient.Core().Events("")})
eventRecorder = broadcaster.NewRecorder(v1.EventSource{Component: "persistentvolume-controller"}) eventRecorder = broadcaster.NewRecorder(v1.EventSource{Component: "persistentvolume-controller"})
} }

View File

@@ -29,7 +29,7 @@ import (
etcd "github.com/coreos/etcd/client" etcd "github.com/coreos/etcd/client"
"github.com/miekg/dns" "github.com/miekg/dns"
skymsg "github.com/skynetservices/skydns/msg" skymsg "github.com/skynetservices/skydns/msg"
skyServer "github.com/skynetservices/skydns/server" skyserver "github.com/skynetservices/skydns/server"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
@@ -171,9 +171,9 @@ func assertSRVRecordsMatchPort(t *testing.T, records []dns.RR, port ...int) {
func TestSkySimpleSRVLookup(t *testing.T) { func TestSkySimpleSRVLookup(t *testing.T) {
kd := newKubeDNS() kd := newKubeDNS()
skydnsConfig := &skyServer.Config{Domain: testDomain, DnsAddr: "0.0.0.0:53"} skydnsConfig := &skyserver.Config{Domain: testDomain, DnsAddr: "0.0.0.0:53"}
skyServer.SetDefaults(skydnsConfig) skyserver.SetDefaults(skydnsConfig)
s := skyServer.New(kd, skydnsConfig) s := skyserver.New(kd, skydnsConfig)
service := newHeadlessService() service := newHeadlessService()
endpointIPs := []string{"10.0.0.1", "10.0.0.2"} endpointIPs := []string{"10.0.0.1", "10.0.0.2"}
@@ -201,9 +201,9 @@ func TestSkySimpleSRVLookup(t *testing.T) {
func TestSkyPodHostnameSRVLookup(t *testing.T) { func TestSkyPodHostnameSRVLookup(t *testing.T) {
kd := newKubeDNS() kd := newKubeDNS()
skydnsConfig := &skyServer.Config{Domain: testDomain, DnsAddr: "0.0.0.0:53"} skydnsConfig := &skyserver.Config{Domain: testDomain, DnsAddr: "0.0.0.0:53"}
skyServer.SetDefaults(skydnsConfig) skyserver.SetDefaults(skydnsConfig)
s := skyServer.New(kd, skydnsConfig) s := skyserver.New(kd, skydnsConfig)
service := newHeadlessService() service := newHeadlessService()
endpointIPs := []string{"10.0.0.1", "10.0.0.2"} endpointIPs := []string{"10.0.0.1", "10.0.0.2"}
@@ -240,9 +240,9 @@ func TestSkyPodHostnameSRVLookup(t *testing.T) {
func TestSkyNamedPortSRVLookup(t *testing.T) { func TestSkyNamedPortSRVLookup(t *testing.T) {
kd := newKubeDNS() kd := newKubeDNS()
skydnsConfig := &skyServer.Config{Domain: testDomain, DnsAddr: "0.0.0.0:53"} skydnsConfig := &skyserver.Config{Domain: testDomain, DnsAddr: "0.0.0.0:53"}
skyServer.SetDefaults(skydnsConfig) skyserver.SetDefaults(skydnsConfig)
s := skyServer.New(kd, skydnsConfig) s := skyserver.New(kd, skydnsConfig)
service := newHeadlessService() service := newHeadlessService()
eip := "10.0.0.1" eip := "10.0.0.1"

View File

@@ -25,7 +25,7 @@ import (
"net/url" "net/url"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1" metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"k8s.io/kubernetes/pkg/client/restclient/fake" "k8s.io/kubernetes/pkg/client/restclient/fake"
cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing" cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing"
) )
@@ -85,14 +85,14 @@ func TestTopPodAllInNamespaceMetrics(t *testing.T) {
metrics := testPodMetricsData() metrics := testPodMetricsData()
testNamespace := "testnamespace" testNamespace := "testnamespace"
nonTestNamespace := "anothernamespace" nonTestNamespace := "anothernamespace"
expectedMetrics := metrics_api.PodMetricsList{ expectedMetrics := metricsapi.PodMetricsList{
ListMeta: metrics.ListMeta, ListMeta: metrics.ListMeta,
Items: metrics.Items[0:2], Items: metrics.Items[0:2],
} }
for _, m := range expectedMetrics.Items { for _, m := range expectedMetrics.Items {
m.Namespace = testNamespace m.Namespace = testNamespace
} }
nonExpectedMetrics := metrics_api.PodMetricsList{ nonExpectedMetrics := metricsapi.PodMetricsList{
ListMeta: metrics.ListMeta, ListMeta: metrics.ListMeta,
Items: metrics.Items[2:], Items: metrics.Items[2:],
} }
@@ -144,7 +144,7 @@ func TestTopPodWithNameMetrics(t *testing.T) {
initTestErrorHandler(t) initTestErrorHandler(t)
metrics := testPodMetricsData() metrics := testPodMetricsData()
expectedMetrics := metrics.Items[0] expectedMetrics := metrics.Items[0]
nonExpectedMetrics := metrics_api.PodMetricsList{ nonExpectedMetrics := metricsapi.PodMetricsList{
ListMeta: metrics.ListMeta, ListMeta: metrics.ListMeta,
Items: metrics.Items[1:], Items: metrics.Items[1:],
} }
@@ -192,11 +192,11 @@ func TestTopPodWithNameMetrics(t *testing.T) {
func TestTopPodWithLabelSelectorMetrics(t *testing.T) { func TestTopPodWithLabelSelectorMetrics(t *testing.T) {
initTestErrorHandler(t) initTestErrorHandler(t)
metrics := testPodMetricsData() metrics := testPodMetricsData()
expectedMetrics := metrics_api.PodMetricsList{ expectedMetrics := metricsapi.PodMetricsList{
ListMeta: metrics.ListMeta, ListMeta: metrics.ListMeta,
Items: metrics.Items[0:2], Items: metrics.Items[0:2],
} }
nonExpectedMetrics := metrics_api.PodMetricsList{ nonExpectedMetrics := metricsapi.PodMetricsList{
ListMeta: metrics.ListMeta, ListMeta: metrics.ListMeta,
Items: metrics.Items[2:], Items: metrics.Items[2:],
} }
@@ -249,7 +249,7 @@ func TestTopPodWithContainersMetrics(t *testing.T) {
initTestErrorHandler(t) initTestErrorHandler(t)
metrics := testPodMetricsData() metrics := testPodMetricsData()
expectedMetrics := metrics.Items[0] expectedMetrics := metrics.Items[0]
nonExpectedMetrics := metrics_api.PodMetricsList{ nonExpectedMetrics := metricsapi.PodMetricsList{
ListMeta: metrics.ListMeta, ListMeta: metrics.ListMeta,
Items: metrics.Items[1:], Items: metrics.Items[1:],
} }

View File

@@ -25,7 +25,7 @@ import (
"testing" "testing"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1" metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
@@ -59,12 +59,12 @@ func marshallBody(metrics interface{}) (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(result)), nil return ioutil.NopCloser(bytes.NewReader(result)), nil
} }
func testNodeMetricsData() (*metrics_api.NodeMetricsList, *api.NodeList) { func testNodeMetricsData() (*metricsapi.NodeMetricsList, *api.NodeList) {
metrics := &metrics_api.NodeMetricsList{ metrics := &metricsapi.NodeMetricsList{
ListMeta: unversioned.ListMeta{ ListMeta: unversioned.ListMeta{
ResourceVersion: "1", ResourceVersion: "1",
}, },
Items: []metrics_api.NodeMetrics{ Items: []metricsapi.NodeMetrics{
{ {
ObjectMeta: v1.ObjectMeta{Name: "node1", ResourceVersion: "10"}, ObjectMeta: v1.ObjectMeta{Name: "node1", ResourceVersion: "10"},
Window: unversioned.Duration{Duration: time.Minute}, Window: unversioned.Duration{Duration: time.Minute},
@@ -115,16 +115,16 @@ func testNodeMetricsData() (*metrics_api.NodeMetricsList, *api.NodeList) {
return metrics, nodes return metrics, nodes
} }
func testPodMetricsData() *metrics_api.PodMetricsList { func testPodMetricsData() *metricsapi.PodMetricsList {
return &metrics_api.PodMetricsList{ return &metricsapi.PodMetricsList{
ListMeta: unversioned.ListMeta{ ListMeta: unversioned.ListMeta{
ResourceVersion: "2", ResourceVersion: "2",
}, },
Items: []metrics_api.PodMetrics{ Items: []metricsapi.PodMetrics{
{ {
ObjectMeta: v1.ObjectMeta{Name: "pod1", Namespace: "test", ResourceVersion: "10"}, ObjectMeta: v1.ObjectMeta{Name: "pod1", Namespace: "test", ResourceVersion: "10"},
Window: unversioned.Duration{Duration: time.Minute}, Window: unversioned.Duration{Duration: time.Minute},
Containers: []metrics_api.ContainerMetrics{ Containers: []metricsapi.ContainerMetrics{
{ {
Name: "container1-1", Name: "container1-1",
Usage: v1.ResourceList{ Usage: v1.ResourceList{
@@ -146,7 +146,7 @@ func testPodMetricsData() *metrics_api.PodMetricsList {
{ {
ObjectMeta: v1.ObjectMeta{Name: "pod2", Namespace: "test", ResourceVersion: "11"}, ObjectMeta: v1.ObjectMeta{Name: "pod2", Namespace: "test", ResourceVersion: "11"},
Window: unversioned.Duration{Duration: time.Minute}, Window: unversioned.Duration{Duration: time.Minute},
Containers: []metrics_api.ContainerMetrics{ Containers: []metricsapi.ContainerMetrics{
{ {
Name: "container2-1", Name: "container2-1",
Usage: v1.ResourceList{ Usage: v1.ResourceList{
@@ -176,7 +176,7 @@ func testPodMetricsData() *metrics_api.PodMetricsList {
{ {
ObjectMeta: v1.ObjectMeta{Name: "pod3", Namespace: "test", ResourceVersion: "12"}, ObjectMeta: v1.ObjectMeta{Name: "pod3", Namespace: "test", ResourceVersion: "12"},
Window: unversioned.Duration{Duration: time.Minute}, Window: unversioned.Duration{Duration: time.Minute},
Containers: []metrics_api.ContainerMetrics{ Containers: []metricsapi.ContainerMetrics{
{ {
Name: "container3-1", Name: "container3-1",
Usage: v1.ResourceList{ Usage: v1.ResourceList{

View File

@@ -19,7 +19,7 @@ package util
import ( import (
"sync" "sync"
fed_clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
@@ -33,7 +33,7 @@ func NewClientCache(loader clientcmd.ClientConfig) *ClientCache {
return &ClientCache{ return &ClientCache{
clientsets: make(map[schema.GroupVersion]*internalclientset.Clientset), clientsets: make(map[schema.GroupVersion]*internalclientset.Clientset),
configs: make(map[schema.GroupVersion]*restclient.Config), configs: make(map[schema.GroupVersion]*restclient.Config),
fedClientSets: make(map[schema.GroupVersion]fed_clientset.Interface), fedClientSets: make(map[schema.GroupVersion]fedclientset.Interface),
loader: loader, loader: loader,
} }
} }
@@ -43,7 +43,7 @@ func NewClientCache(loader clientcmd.ClientConfig) *ClientCache {
type ClientCache struct { type ClientCache struct {
loader clientcmd.ClientConfig loader clientcmd.ClientConfig
clientsets map[schema.GroupVersion]*internalclientset.Clientset clientsets map[schema.GroupVersion]*internalclientset.Clientset
fedClientSets map[schema.GroupVersion]fed_clientset.Interface fedClientSets map[schema.GroupVersion]fedclientset.Interface
configs map[schema.GroupVersion]*restclient.Config configs map[schema.GroupVersion]*restclient.Config
matchVersion bool matchVersion bool
@@ -158,7 +158,7 @@ func (c *ClientCache) ClientSetForVersion(requiredVersion *schema.GroupVersion)
return clientset, nil return clientset, nil
} }
func (c *ClientCache) FederationClientSetForVersion(version *schema.GroupVersion) (fed_clientset.Interface, error) { func (c *ClientCache) FederationClientSetForVersion(version *schema.GroupVersion) (fedclientset.Interface, error) {
if version != nil { if version != nil {
if clientSet, found := c.fedClientSets[*version]; found { if clientSet, found := c.fedClientSets[*version]; found {
return clientSet, nil return clientSet, nil
@@ -170,7 +170,7 @@ func (c *ClientCache) FederationClientSetForVersion(version *schema.GroupVersion
} }
// TODO: support multi versions of client with clientset // TODO: support multi versions of client with clientset
clientSet, err := fed_clientset.NewForConfig(config) clientSet, err := fedclientset.NewForConfig(config)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -178,7 +178,7 @@ func (c *ClientCache) FederationClientSetForVersion(version *schema.GroupVersion
if version != nil { if version != nil {
configCopy := *config configCopy := *config
clientSet, err := fed_clientset.NewForConfig(&configCopy) clientSet, err := fedclientset.NewForConfig(&configCopy)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -29,7 +29,7 @@ import (
"time" "time"
"k8s.io/kubernetes/federation/apis/federation" "k8s.io/kubernetes/federation/apis/federation"
fed_clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/events" "k8s.io/kubernetes/pkg/api/events"
@@ -2320,7 +2320,7 @@ func describeConfigMap(configMap *api.ConfigMap) (string, error) {
} }
type ClusterDescriber struct { type ClusterDescriber struct {
fed_clientset.Interface fedclientset.Interface
} }
func (d *ClusterDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { func (d *ClusterDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) {

View File

@@ -26,7 +26,7 @@ import (
"time" "time"
"k8s.io/kubernetes/federation/apis/federation" "k8s.io/kubernetes/federation/apis/federation"
fed_fake "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/fake" fedfake "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/fake"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
@@ -665,7 +665,7 @@ func TestDescribeCluster(t *testing.T) {
}, },
}, },
} }
fake := fed_fake.NewSimpleClientset(&cluster) fake := fedfake.NewSimpleClientset(&cluster)
d := ClusterDescriber{Interface: fake} d := ClusterDescriber{Interface: fake}
out, err := d.Describe("any", "foo", DescriberSettings{ShowEvents: true}) out, err := d.Describe("any", "foo", DescriberSettings{ShowEvents: true})
if err != nil { if err != nil {

View File

@@ -21,7 +21,7 @@ import (
"errors" "errors"
"fmt" "fmt"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1" metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/api/validation"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
@@ -97,63 +97,63 @@ func nodeMetricsUrl(name string) (string, error) {
return fmt.Sprintf("%s/nodes/%s", metricsRoot, name), nil return fmt.Sprintf("%s/nodes/%s", metricsRoot, name), nil
} }
func (cli *HeapsterMetricsClient) GetNodeMetrics(nodeName string, selector labels.Selector) ([]metrics_api.NodeMetrics, error) { func (cli *HeapsterMetricsClient) GetNodeMetrics(nodeName string, selector labels.Selector) ([]metricsapi.NodeMetrics, error) {
params := map[string]string{"labelSelector": selector.String()} params := map[string]string{"labelSelector": selector.String()}
path, err := nodeMetricsUrl(nodeName) path, err := nodeMetricsUrl(nodeName)
if err != nil { if err != nil {
return []metrics_api.NodeMetrics{}, err return []metricsapi.NodeMetrics{}, err
} }
resultRaw, err := GetHeapsterMetrics(cli, path, params) resultRaw, err := GetHeapsterMetrics(cli, path, params)
if err != nil { if err != nil {
return []metrics_api.NodeMetrics{}, err return []metricsapi.NodeMetrics{}, err
} }
metrics := make([]metrics_api.NodeMetrics, 0) metrics := make([]metricsapi.NodeMetrics, 0)
if len(nodeName) == 0 { if len(nodeName) == 0 {
metricsList := metrics_api.NodeMetricsList{} metricsList := metricsapi.NodeMetricsList{}
err = json.Unmarshal(resultRaw, &metricsList) err = json.Unmarshal(resultRaw, &metricsList)
if err != nil { if err != nil {
return []metrics_api.NodeMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err) return []metricsapi.NodeMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
} }
metrics = append(metrics, metricsList.Items...) metrics = append(metrics, metricsList.Items...)
} else { } else {
var singleMetric metrics_api.NodeMetrics var singleMetric metricsapi.NodeMetrics
err = json.Unmarshal(resultRaw, &singleMetric) err = json.Unmarshal(resultRaw, &singleMetric)
if err != nil { if err != nil {
return []metrics_api.NodeMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err) return []metricsapi.NodeMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
} }
metrics = append(metrics, singleMetric) metrics = append(metrics, singleMetric)
} }
return metrics, nil return metrics, nil
} }
func (cli *HeapsterMetricsClient) GetPodMetrics(namespace string, podName string, allNamespaces bool, selector labels.Selector) ([]metrics_api.PodMetrics, error) { func (cli *HeapsterMetricsClient) GetPodMetrics(namespace string, podName string, allNamespaces bool, selector labels.Selector) ([]metricsapi.PodMetrics, error) {
if allNamespaces { if allNamespaces {
namespace = api.NamespaceAll namespace = api.NamespaceAll
} }
path, err := podMetricsUrl(namespace, podName) path, err := podMetricsUrl(namespace, podName)
if err != nil { if err != nil {
return []metrics_api.PodMetrics{}, err return []metricsapi.PodMetrics{}, err
} }
params := map[string]string{"labelSelector": selector.String()} params := map[string]string{"labelSelector": selector.String()}
allMetrics := make([]metrics_api.PodMetrics, 0) allMetrics := make([]metricsapi.PodMetrics, 0)
resultRaw, err := GetHeapsterMetrics(cli, path, params) resultRaw, err := GetHeapsterMetrics(cli, path, params)
if err != nil { if err != nil {
return []metrics_api.PodMetrics{}, err return []metricsapi.PodMetrics{}, err
} }
if len(podName) == 0 { if len(podName) == 0 {
metrics := metrics_api.PodMetricsList{} metrics := metricsapi.PodMetricsList{}
err = json.Unmarshal(resultRaw, &metrics) err = json.Unmarshal(resultRaw, &metrics)
if err != nil { if err != nil {
return []metrics_api.PodMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err) return []metricsapi.PodMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
} }
allMetrics = append(allMetrics, metrics.Items...) allMetrics = append(allMetrics, metrics.Items...)
} else { } else {
var singleMetric metrics_api.PodMetrics var singleMetric metricsapi.PodMetrics
err = json.Unmarshal(resultRaw, &singleMetric) err = json.Unmarshal(resultRaw, &singleMetric)
if err != nil { if err != nil {
return []metrics_api.PodMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err) return []metricsapi.PodMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
} }
allMetrics = append(allMetrics, singleMetric) allMetrics = append(allMetrics, singleMetric)
} }

View File

@@ -20,7 +20,7 @@ import (
"fmt" "fmt"
"io" "io"
metrics_api "k8s.io/heapster/metrics/apis/metrics/v1alpha1" metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
@@ -51,7 +51,7 @@ func NewTopCmdPrinter(out io.Writer) *TopCmdPrinter {
return &TopCmdPrinter{out: out} return &TopCmdPrinter{out: out}
} }
func (printer *TopCmdPrinter) PrintNodeMetrics(metrics []metrics_api.NodeMetrics, availableResources map[string]api.ResourceList) error { func (printer *TopCmdPrinter) PrintNodeMetrics(metrics []metricsapi.NodeMetrics, availableResources map[string]api.ResourceList) error {
if len(metrics) == 0 { if len(metrics) == 0 {
return nil return nil
} }
@@ -74,7 +74,7 @@ func (printer *TopCmdPrinter) PrintNodeMetrics(metrics []metrics_api.NodeMetrics
return nil return nil
} }
func (printer *TopCmdPrinter) PrintPodMetrics(metrics []metrics_api.PodMetrics, printContainers bool, withNamespace bool) error { func (printer *TopCmdPrinter) PrintPodMetrics(metrics []metricsapi.PodMetrics, printContainers bool, withNamespace bool) error {
if len(metrics) == 0 { if len(metrics) == 0 {
return nil return nil
} }
@@ -104,7 +104,7 @@ func printColumnNames(out io.Writer, names []string) {
fmt.Fprint(out, "\n") fmt.Fprint(out, "\n")
} }
func printSinglePodMetrics(out io.Writer, m *metrics_api.PodMetrics, printContainersOnly bool, withNamespace bool) error { func printSinglePodMetrics(out io.Writer, m *metricsapi.PodMetrics, printContainersOnly bool, withNamespace bool) error {
containers := make(map[string]api.ResourceList) containers := make(map[string]api.ResourceList)
podMetrics := make(api.ResourceList) podMetrics := make(api.ResourceList)
for _, res := range MeasuredResources { for _, res := range MeasuredResources {

View File

@@ -19,20 +19,20 @@ package api
import ( import (
"time" "time"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
// RuntimeVersioner contains methods for runtime name, version and API version. // RuntimeVersioner contains methods for runtime name, version and API version.
type RuntimeVersioner interface { type RuntimeVersioner interface {
// Version returns the runtime name, runtime version and runtime API version // Version returns the runtime name, runtime version and runtime API version
Version(apiVersion string) (*runtimeApi.VersionResponse, error) Version(apiVersion string) (*runtimeapi.VersionResponse, error)
} }
// ContainerManager contains methods to manipulate containers managed by a // ContainerManager contains methods to manipulate containers managed by a
// container runtime. The methods are thread-safe. // container runtime. The methods are thread-safe.
type ContainerManager interface { type ContainerManager interface {
// CreateContainer creates a new container in specified PodSandbox. // CreateContainer creates a new container in specified PodSandbox.
CreateContainer(podSandboxID string, config *runtimeApi.ContainerConfig, sandboxConfig *runtimeApi.PodSandboxConfig) (string, error) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error)
// StartContainer starts the container. // StartContainer starts the container.
StartContainer(containerID string) error StartContainer(containerID string) error
// StopContainer stops a running container with a grace period (i.e., timeout). // StopContainer stops a running container with a grace period (i.e., timeout).
@@ -40,16 +40,16 @@ type ContainerManager interface {
// RemoveContainer removes the container. // RemoveContainer removes the container.
RemoveContainer(containerID string) error RemoveContainer(containerID string) error
// ListContainers lists all containers by filters. // ListContainers lists all containers by filters.
ListContainers(filter *runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error)
// ContainerStatus returns the status of the container. // ContainerStatus returns the status of the container.
ContainerStatus(containerID string) (*runtimeApi.ContainerStatus, error) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error)
// ExecSync executes a command in the container, and returns the stdout output. // ExecSync executes a command in the container, and returns the stdout output.
// If command exits with a non-zero exit code, an error is returned. // If command exits with a non-zero exit code, an error is returned.
ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error)
// Exec prepares a streaming endpoint to execute a command in the container, and returns the address. // Exec prepares a streaming endpoint to execute a command in the container, and returns the address.
Exec(*runtimeApi.ExecRequest) (*runtimeApi.ExecResponse, error) Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error)
// Attach prepares a streaming endpoint to attach to a running container, and returns the address. // Attach prepares a streaming endpoint to attach to a running container, and returns the address.
Attach(req *runtimeApi.AttachRequest) (*runtimeApi.AttachResponse, error) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error)
} }
// PodSandboxManager contains methods for operating on PodSandboxes. The methods // PodSandboxManager contains methods for operating on PodSandboxes. The methods
@@ -57,7 +57,7 @@ type ContainerManager interface {
type PodSandboxManager interface { type PodSandboxManager interface {
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure // RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure
// the sandbox is in ready state. // the sandbox is in ready state.
RunPodSandbox(config *runtimeApi.PodSandboxConfig) (string, error) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error)
// StopPodSandbox stops the sandbox. If there are any running containers in the // StopPodSandbox stops the sandbox. If there are any running containers in the
// sandbox, they should be force terminated. // sandbox, they should be force terminated.
StopPodSandbox(podSandboxID string) error StopPodSandbox(podSandboxID string) error
@@ -65,11 +65,11 @@ type PodSandboxManager interface {
// sandbox, they should be forcibly removed. // sandbox, they should be forcibly removed.
RemovePodSandbox(podSandboxID string) error RemovePodSandbox(podSandboxID string) error
// PodSandboxStatus returns the Status of the PodSandbox. // PodSandboxStatus returns the Status of the PodSandbox.
PodSandboxStatus(podSandboxID string) (*runtimeApi.PodSandboxStatus, error) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error)
// ListPodSandbox returns a list of Sandbox. // ListPodSandbox returns a list of Sandbox.
ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]*runtimeApi.PodSandbox, error) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error)
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address. // PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address.
PortForward(*runtimeApi.PortForwardRequest) (*runtimeApi.PortForwardResponse, error) PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error)
} }
// RuntimeService interface should be implemented by a container runtime. // RuntimeService interface should be implemented by a container runtime.
@@ -80,9 +80,9 @@ type RuntimeService interface {
PodSandboxManager PodSandboxManager
// UpdateRuntimeConfig updates runtime configuration if specified // UpdateRuntimeConfig updates runtime configuration if specified
UpdateRuntimeConfig(runtimeConfig *runtimeApi.RuntimeConfig) error UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error
// Status returns the status of the runtime. // Status returns the status of the runtime.
Status() (*runtimeApi.RuntimeStatus, error) Status() (*runtimeapi.RuntimeStatus, error)
} }
// ImageManagerService interface should be implemented by a container image // ImageManagerService interface should be implemented by a container image
@@ -90,11 +90,11 @@ type RuntimeService interface {
// The methods should be thread-safe. // The methods should be thread-safe.
type ImageManagerService interface { type ImageManagerService interface {
// ListImages lists the existing images. // ListImages lists the existing images.
ListImages(filter *runtimeApi.ImageFilter) ([]*runtimeApi.Image, error) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error)
// ImageStatus returns the status of the image. // ImageStatus returns the status of the image.
ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi.Image, error) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error)
// PullImage pulls an image with the authentication config. // PullImage pulls an image with the authentication config.
PullImage(image *runtimeApi.ImageSpec, auth *runtimeApi.AuthConfig) error PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) error
// RemoveImage removes the image. // RemoveImage removes the image.
RemoveImage(image *runtimeApi.ImageSpec) error RemoveImage(image *runtimeapi.ImageSpec) error
} }

View File

@@ -19,7 +19,7 @@ package testing
import ( import (
"sync" "sync"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils" "k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
) )
@@ -28,14 +28,14 @@ type FakeImageService struct {
FakeImageSize uint64 FakeImageSize uint64
Called []string Called []string
Images map[string]*runtimeApi.Image Images map[string]*runtimeapi.Image
} }
func (r *FakeImageService) SetFakeImages(images []string) { func (r *FakeImageService) SetFakeImages(images []string) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
r.Images = make(map[string]*runtimeApi.Image) r.Images = make(map[string]*runtimeapi.Image)
for _, image := range images { for _, image := range images {
r.Images[image] = r.makeFakeImage(image) r.Images[image] = r.makeFakeImage(image)
} }
@@ -51,25 +51,25 @@ func (r *FakeImageService) SetFakeImageSize(size uint64) {
func NewFakeImageService() *FakeImageService { func NewFakeImageService() *FakeImageService {
return &FakeImageService{ return &FakeImageService{
Called: make([]string, 0), Called: make([]string, 0),
Images: make(map[string]*runtimeApi.Image), Images: make(map[string]*runtimeapi.Image),
} }
} }
func (r *FakeImageService) makeFakeImage(image string) *runtimeApi.Image { func (r *FakeImageService) makeFakeImage(image string) *runtimeapi.Image {
return &runtimeApi.Image{ return &runtimeapi.Image{
Id: &image, Id: &image,
Size_: &r.FakeImageSize, Size_: &r.FakeImageSize,
RepoTags: []string{image}, RepoTags: []string{image},
} }
} }
func (r *FakeImageService) ListImages(filter *runtimeApi.ImageFilter) ([]*runtimeApi.Image, error) { func (r *FakeImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
r.Called = append(r.Called, "ListImages") r.Called = append(r.Called, "ListImages")
images := make([]*runtimeApi.Image, 0) images := make([]*runtimeapi.Image, 0)
for _, img := range r.Images { for _, img := range r.Images {
if filter != nil && filter.Image != nil { if filter != nil && filter.Image != nil {
if !sliceutils.StringInSlice(filter.Image.GetImage(), img.RepoTags) { if !sliceutils.StringInSlice(filter.Image.GetImage(), img.RepoTags) {
@@ -82,7 +82,7 @@ func (r *FakeImageService) ListImages(filter *runtimeApi.ImageFilter) ([]*runtim
return images, nil return images, nil
} }
func (r *FakeImageService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi.Image, error) { func (r *FakeImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
@@ -91,7 +91,7 @@ func (r *FakeImageService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi
return r.Images[image.GetImage()], nil return r.Images[image.GetImage()], nil
} }
func (r *FakeImageService) PullImage(image *runtimeApi.ImageSpec, auth *runtimeApi.AuthConfig) error { func (r *FakeImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) error {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
@@ -107,7 +107,7 @@ func (r *FakeImageService) PullImage(image *runtimeApi.ImageSpec, auth *runtimeA
return nil return nil
} }
func (r *FakeImageService) RemoveImage(image *runtimeApi.ImageSpec) error { func (r *FakeImageService) RemoveImage(image *runtimeapi.ImageSpec) error {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()

View File

@@ -22,7 +22,7 @@ import (
"sync" "sync"
"time" "time"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
var ( var (
@@ -34,12 +34,12 @@ var (
type FakePodSandbox struct { type FakePodSandbox struct {
// PodSandboxStatus contains the runtime information for a sandbox. // PodSandboxStatus contains the runtime information for a sandbox.
runtimeApi.PodSandboxStatus runtimeapi.PodSandboxStatus
} }
type FakeContainer struct { type FakeContainer struct {
// ContainerStatus contains the runtime information for a container. // ContainerStatus contains the runtime information for a container.
runtimeApi.ContainerStatus runtimeapi.ContainerStatus
// the sandbox id of this container // the sandbox id of this container
SandboxID string SandboxID string
@@ -50,7 +50,7 @@ type FakeRuntimeService struct {
Called []string Called []string
FakeStatus *runtimeApi.RuntimeStatus FakeStatus *runtimeapi.RuntimeStatus
Containers map[string]*FakeContainer Containers map[string]*FakeContainer
Sandboxes map[string]*FakePodSandbox Sandboxes map[string]*FakePodSandbox
} }
@@ -96,13 +96,13 @@ func NewFakeRuntimeService() *FakeRuntimeService {
} }
} }
func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeApi.VersionResponse, error) { func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
r.Called = append(r.Called, "Version") r.Called = append(r.Called, "Version")
return &runtimeApi.VersionResponse{ return &runtimeapi.VersionResponse{
Version: &version, Version: &version,
RuntimeName: &FakeRuntimeName, RuntimeName: &FakeRuntimeName,
RuntimeVersion: &version, RuntimeVersion: &version,
@@ -110,7 +110,7 @@ func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeApi.VersionResp
}, nil }, nil
} }
func (r *FakeRuntimeService) Status() (*runtimeApi.RuntimeStatus, error) { func (r *FakeRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
@@ -119,7 +119,7 @@ func (r *FakeRuntimeService) Status() (*runtimeApi.RuntimeStatus, error) {
return r.FakeStatus, nil return r.FakeStatus, nil
} }
func (r *FakeRuntimeService) RunPodSandbox(config *runtimeApi.PodSandboxConfig) (string, error) { func (r *FakeRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
@@ -129,14 +129,14 @@ func (r *FakeRuntimeService) RunPodSandbox(config *runtimeApi.PodSandboxConfig)
// fixed name from BuildSandboxName() for easily making fake sandboxes. // fixed name from BuildSandboxName() for easily making fake sandboxes.
podSandboxID := BuildSandboxName(config.Metadata) podSandboxID := BuildSandboxName(config.Metadata)
createdAt := time.Now().Unix() createdAt := time.Now().Unix()
readyState := runtimeApi.PodSandboxState_SANDBOX_READY readyState := runtimeapi.PodSandboxState_SANDBOX_READY
r.Sandboxes[podSandboxID] = &FakePodSandbox{ r.Sandboxes[podSandboxID] = &FakePodSandbox{
PodSandboxStatus: runtimeApi.PodSandboxStatus{ PodSandboxStatus: runtimeapi.PodSandboxStatus{
Id: &podSandboxID, Id: &podSandboxID,
Metadata: config.Metadata, Metadata: config.Metadata,
State: &readyState, State: &readyState,
CreatedAt: &createdAt, CreatedAt: &createdAt,
Network: &runtimeApi.PodSandboxNetworkStatus{ Network: &runtimeapi.PodSandboxNetworkStatus{
Ip: &FakePodSandboxIP, Ip: &FakePodSandboxIP,
}, },
Labels: config.Labels, Labels: config.Labels,
@@ -153,7 +153,7 @@ func (r *FakeRuntimeService) StopPodSandbox(podSandboxID string) error {
r.Called = append(r.Called, "StopPodSandbox") r.Called = append(r.Called, "StopPodSandbox")
notReadyState := runtimeApi.PodSandboxState_SANDBOX_NOTREADY notReadyState := runtimeapi.PodSandboxState_SANDBOX_NOTREADY
if s, ok := r.Sandboxes[podSandboxID]; ok { if s, ok := r.Sandboxes[podSandboxID]; ok {
s.State = &notReadyState s.State = &notReadyState
} else { } else {
@@ -175,7 +175,7 @@ func (r *FakeRuntimeService) RemovePodSandbox(podSandboxID string) error {
return nil return nil
} }
func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeApi.PodSandboxStatus, error) { func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
@@ -190,13 +190,13 @@ func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeApi.
return &status, nil return &status, nil
} }
func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]*runtimeApi.PodSandbox, error) { func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
r.Called = append(r.Called, "ListPodSandbox") r.Called = append(r.Called, "ListPodSandbox")
result := make([]*runtimeApi.PodSandbox, 0) result := make([]*runtimeapi.PodSandbox, 0)
for id, s := range r.Sandboxes { for id, s := range r.Sandboxes {
if filter != nil { if filter != nil {
if filter.Id != nil && filter.GetId() != id { if filter.Id != nil && filter.GetId() != id {
@@ -210,7 +210,7 @@ func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter)
} }
} }
result = append(result, &runtimeApi.PodSandbox{ result = append(result, &runtimeapi.PodSandbox{
Id: s.Id, Id: s.Id,
Metadata: s.Metadata, Metadata: s.Metadata,
State: s.State, State: s.State,
@@ -223,15 +223,15 @@ func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter)
return result, nil return result, nil
} }
func (r *FakeRuntimeService) PortForward(*runtimeApi.PortForwardRequest) (*runtimeApi.PortForwardResponse, error) { func (r *FakeRuntimeService) PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
r.Called = append(r.Called, "PortForward") r.Called = append(r.Called, "PortForward")
return &runtimeApi.PortForwardResponse{}, nil return &runtimeapi.PortForwardResponse{}, nil
} }
func (r *FakeRuntimeService) CreateContainer(podSandboxID string, config *runtimeApi.ContainerConfig, sandboxConfig *runtimeApi.PodSandboxConfig) (string, error) { func (r *FakeRuntimeService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
@@ -241,10 +241,10 @@ func (r *FakeRuntimeService) CreateContainer(podSandboxID string, config *runtim
// fixed BuildContainerName() for easily making fake containers. // fixed BuildContainerName() for easily making fake containers.
containerID := BuildContainerName(config.Metadata, podSandboxID) containerID := BuildContainerName(config.Metadata, podSandboxID)
createdAt := time.Now().Unix() createdAt := time.Now().Unix()
createdState := runtimeApi.ContainerState_CONTAINER_CREATED createdState := runtimeapi.ContainerState_CONTAINER_CREATED
imageRef := config.Image.GetImage() imageRef := config.Image.GetImage()
r.Containers[containerID] = &FakeContainer{ r.Containers[containerID] = &FakeContainer{
ContainerStatus: runtimeApi.ContainerStatus{ ContainerStatus: runtimeapi.ContainerStatus{
Id: &containerID, Id: &containerID,
Metadata: config.Metadata, Metadata: config.Metadata,
Image: config.Image, Image: config.Image,
@@ -273,7 +273,7 @@ func (r *FakeRuntimeService) StartContainer(containerID string) error {
// Set container to running. // Set container to running.
startedAt := time.Now().Unix() startedAt := time.Now().Unix()
runningState := runtimeApi.ContainerState_CONTAINER_RUNNING runningState := runtimeapi.ContainerState_CONTAINER_RUNNING
c.State = &runningState c.State = &runningState
c.StartedAt = &startedAt c.StartedAt = &startedAt
@@ -293,7 +293,7 @@ func (r *FakeRuntimeService) StopContainer(containerID string, timeout int64) er
// Set container to exited state. // Set container to exited state.
finishedAt := time.Now().Unix() finishedAt := time.Now().Unix()
exitedState := runtimeApi.ContainerState_CONTAINER_EXITED exitedState := runtimeapi.ContainerState_CONTAINER_EXITED
c.State = &exitedState c.State = &exitedState
c.FinishedAt = &finishedAt c.FinishedAt = &finishedAt
@@ -312,13 +312,13 @@ func (r *FakeRuntimeService) RemoveContainer(containerID string) error {
return nil return nil
} }
func (r *FakeRuntimeService) ListContainers(filter *runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) { func (r *FakeRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
r.Called = append(r.Called, "ListContainers") r.Called = append(r.Called, "ListContainers")
result := make([]*runtimeApi.Container, 0) result := make([]*runtimeapi.Container, 0)
for _, s := range r.Containers { for _, s := range r.Containers {
if filter != nil { if filter != nil {
if filter.Id != nil && filter.GetId() != s.GetId() { if filter.Id != nil && filter.GetId() != s.GetId() {
@@ -335,7 +335,7 @@ func (r *FakeRuntimeService) ListContainers(filter *runtimeApi.ContainerFilter)
} }
} }
result = append(result, &runtimeApi.Container{ result = append(result, &runtimeapi.Container{
Id: s.Id, Id: s.Id,
CreatedAt: s.CreatedAt, CreatedAt: s.CreatedAt,
PodSandboxId: &s.SandboxID, PodSandboxId: &s.SandboxID,
@@ -351,7 +351,7 @@ func (r *FakeRuntimeService) ListContainers(filter *runtimeApi.ContainerFilter)
return result, nil return result, nil
} }
func (r *FakeRuntimeService) ContainerStatus(containerID string) (*runtimeApi.ContainerStatus, error) { func (r *FakeRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
@@ -374,22 +374,22 @@ func (r *FakeRuntimeService) ExecSync(containerID string, cmd []string, timeout
return nil, nil, nil return nil, nil, nil
} }
func (r *FakeRuntimeService) Exec(*runtimeApi.ExecRequest) (*runtimeApi.ExecResponse, error) { func (r *FakeRuntimeService) Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
r.Called = append(r.Called, "Exec") r.Called = append(r.Called, "Exec")
return &runtimeApi.ExecResponse{}, nil return &runtimeapi.ExecResponse{}, nil
} }
func (r *FakeRuntimeService) Attach(req *runtimeApi.AttachRequest) (*runtimeApi.AttachResponse, error) { func (r *FakeRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
r.Called = append(r.Called, "Attach") r.Called = append(r.Called, "Attach")
return &runtimeApi.AttachResponse{}, nil return &runtimeapi.AttachResponse{}, nil
} }
func (r *FakeRuntimeService) UpdateRuntimeConfig(runtimeCOnfig *runtimeApi.RuntimeConfig) error { func (r *FakeRuntimeService) UpdateRuntimeConfig(runtimeCOnfig *runtimeapi.RuntimeConfig) error {
return nil return nil
} }

View File

@@ -19,15 +19,15 @@ package testing
import ( import (
"fmt" "fmt"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
func BuildContainerName(metadata *runtimeApi.ContainerMetadata, sandboxID string) string { func BuildContainerName(metadata *runtimeapi.ContainerMetadata, sandboxID string) string {
// include the sandbox ID to make the container ID unique. // include the sandbox ID to make the container ID unique.
return fmt.Sprintf("%s_%s_%d", sandboxID, metadata.GetName(), metadata.GetAttempt()) return fmt.Sprintf("%s_%s_%d", sandboxID, metadata.GetName(), metadata.GetAttempt())
} }
func BuildSandboxName(metadata *runtimeApi.PodSandboxMetadata) string { func BuildSandboxName(metadata *runtimeapi.PodSandboxMetadata) string {
return fmt.Sprintf("%s_%s_%s_%d", metadata.GetName(), metadata.GetNamespace(), metadata.GetUid(), metadata.GetAttempt()) return fmt.Sprintf("%s_%s_%s_%d", metadata.GetName(), metadata.GetNamespace(), metadata.GetUid(), metadata.GetAttempt())
} }

View File

@@ -26,7 +26,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"github.com/google/cadvisor/cache/memory" "github.com/google/cadvisor/cache/memory"
cadvisorMetrics "github.com/google/cadvisor/container" cadvisormetrics "github.com/google/cadvisor/container"
"github.com/google/cadvisor/events" "github.com/google/cadvisor/events"
cadvisorfs "github.com/google/cadvisor/fs" cadvisorfs "github.com/google/cadvisor/fs"
cadvisorhttp "github.com/google/cadvisor/http" cadvisorhttp "github.com/google/cadvisor/http"
@@ -101,7 +101,7 @@ func New(port uint, runtime string, rootPath string) (Interface, error) {
} }
// Create and start the cAdvisor container manager. // Create and start the cAdvisor container manager.
m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, maxHousekeepingInterval, allowDynamicHousekeeping, cadvisorMetrics.MetricSet{cadvisorMetrics.NetworkTcpUsageMetrics: struct{}{}}, http.DefaultClient) m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, maxHousekeepingInterval, allowDynamicHousekeeping, cadvisormetrics.MetricSet{cadvisormetrics.NetworkTcpUsageMetrics: struct{}{}}, http.DefaultClient)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -17,12 +17,12 @@ limitations under the License.
package cadvisor package cadvisor
import ( import (
cadvisorApi "github.com/google/cadvisor/info/v1" cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
) )
func CapacityFromMachineInfo(info *cadvisorApi.MachineInfo) v1.ResourceList { func CapacityFromMachineInfo(info *cadvisorapi.MachineInfo) v1.ResourceList {
c := v1.ResourceList{ c := v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity( v1.ResourceCPU: *resource.NewMilliQuantity(
int64(info.NumCores*1000), int64(info.NumCores*1000),

View File

@@ -28,7 +28,7 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/kubelet/util/ioutils" "k8s.io/kubernetes/pkg/kubelet/util/ioutils"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@@ -209,11 +209,11 @@ func ConvertPodStatusToRunningPod(runtimeName string, podStatus *PodStatus) Pod
// This is only needed because we need to return sandboxes as if they were // This is only needed because we need to return sandboxes as if they were
// kubecontainer.Containers to avoid substantial changes to PLEG. // kubecontainer.Containers to avoid substantial changes to PLEG.
// TODO: Remove this once it becomes obsolete. // TODO: Remove this once it becomes obsolete.
func SandboxToContainerState(state runtimeApi.PodSandboxState) ContainerState { func SandboxToContainerState(state runtimeapi.PodSandboxState) ContainerState {
switch state { switch state {
case runtimeApi.PodSandboxState_SANDBOX_READY: case runtimeapi.PodSandboxState_SANDBOX_READY:
return ContainerStateRunning return ContainerStateRunning
case runtimeApi.PodSandboxState_SANDBOX_NOTREADY: case runtimeapi.PodSandboxState_SANDBOX_NOTREADY:
return ContainerStateExited return ContainerStateExited
} }
return ContainerStateUnknown return ContainerStateUnknown

View File

@@ -26,7 +26,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol" "k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/term" "k8s.io/kubernetes/pkg/util/term"
@@ -299,7 +299,7 @@ type PodStatus struct {
ContainerStatuses []*ContainerStatus ContainerStatuses []*ContainerStatus
// Status of the pod sandbox. // Status of the pod sandbox.
// Only for kuberuntime now, other runtime may keep it nil. // Only for kuberuntime now, other runtime may keep it nil.
SandboxStatuses []*runtimeApi.PodSandboxStatus SandboxStatuses []*runtimeapi.PodSandboxStatus
} }
// ContainerStatus represents the status of a container. // ContainerStatus represents the status of a container.

View File

@@ -23,7 +23,7 @@ import (
dockertypes "github.com/docker/engine-api/types" dockertypes "github.com/docker/engine-api/types"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
// This file contains helper functions to convert docker API types to runtime // This file contains helper functions to convert docker API types to runtime
@@ -36,13 +36,13 @@ const (
statusExitedPrefix = "Exited" statusExitedPrefix = "Exited"
) )
func imageToRuntimeAPIImage(image *dockertypes.Image) (*runtimeApi.Image, error) { func imageToRuntimeAPIImage(image *dockertypes.Image) (*runtimeapi.Image, error) {
if image == nil { if image == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime API image") return nil, fmt.Errorf("unable to convert a nil pointer to a runtime API image")
} }
size := uint64(image.VirtualSize) size := uint64(image.VirtualSize)
return &runtimeApi.Image{ return &runtimeapi.Image{
Id: &image.ID, Id: &image.ID,
RepoTags: image.RepoTags, RepoTags: image.RepoTags,
RepoDigests: image.RepoDigests, RepoDigests: image.RepoDigests,
@@ -50,13 +50,13 @@ func imageToRuntimeAPIImage(image *dockertypes.Image) (*runtimeApi.Image, error)
}, nil }, nil
} }
func imageInspectToRuntimeAPIImage(image *dockertypes.ImageInspect) (*runtimeApi.Image, error) { func imageInspectToRuntimeAPIImage(image *dockertypes.ImageInspect) (*runtimeapi.Image, error) {
if image == nil { if image == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime API image") return nil, fmt.Errorf("unable to convert a nil pointer to a runtime API image")
} }
size := uint64(image.VirtualSize) size := uint64(image.VirtualSize)
runtimeImage := &runtimeApi.Image{ runtimeImage := &runtimeapi.Image{
Id: &image.ID, Id: &image.ID,
RepoTags: image.RepoTags, RepoTags: image.RepoTags,
RepoDigests: image.RepoDigests, RepoDigests: image.RepoDigests,
@@ -77,7 +77,7 @@ func toPullableImageID(id string, image *dockertypes.ImageInspect) string {
return imageID return imageID
} }
func toRuntimeAPIContainer(c *dockertypes.Container) (*runtimeApi.Container, error) { func toRuntimeAPIContainer(c *dockertypes.Container) (*runtimeapi.Container, error) {
state := toRuntimeAPIContainerState(c.Status) state := toRuntimeAPIContainerState(c.Status)
if len(c.Names) == 0 { if len(c.Names) == 0 {
return nil, fmt.Errorf("unexpected empty container name: %+v", c) return nil, fmt.Errorf("unexpected empty container name: %+v", c)
@@ -90,11 +90,11 @@ func toRuntimeAPIContainer(c *dockertypes.Container) (*runtimeApi.Container, err
sandboxID := c.Labels[sandboxIDLabelKey] sandboxID := c.Labels[sandboxIDLabelKey]
// The timestamp in dockertypes.Container is in seconds. // The timestamp in dockertypes.Container is in seconds.
createdAt := c.Created * int64(time.Second) createdAt := c.Created * int64(time.Second)
return &runtimeApi.Container{ return &runtimeapi.Container{
Id: &c.ID, Id: &c.ID,
PodSandboxId: &sandboxID, PodSandboxId: &sandboxID,
Metadata: metadata, Metadata: metadata,
Image: &runtimeApi.ImageSpec{Image: &c.Image}, Image: &runtimeapi.ImageSpec{Image: &c.Image},
ImageRef: &c.ImageID, ImageRef: &c.ImageID,
State: &state, State: &state,
CreatedAt: &createdAt, CreatedAt: &createdAt,
@@ -103,48 +103,48 @@ func toRuntimeAPIContainer(c *dockertypes.Container) (*runtimeApi.Container, err
}, nil }, nil
} }
func toDockerContainerStatus(state runtimeApi.ContainerState) string { func toDockerContainerStatus(state runtimeapi.ContainerState) string {
switch state { switch state {
case runtimeApi.ContainerState_CONTAINER_CREATED: case runtimeapi.ContainerState_CONTAINER_CREATED:
return "created" return "created"
case runtimeApi.ContainerState_CONTAINER_RUNNING: case runtimeapi.ContainerState_CONTAINER_RUNNING:
return "running" return "running"
case runtimeApi.ContainerState_CONTAINER_EXITED: case runtimeapi.ContainerState_CONTAINER_EXITED:
return "exited" return "exited"
case runtimeApi.ContainerState_CONTAINER_UNKNOWN: case runtimeapi.ContainerState_CONTAINER_UNKNOWN:
fallthrough fallthrough
default: default:
return "unknown" return "unknown"
} }
} }
func toRuntimeAPIContainerState(state string) runtimeApi.ContainerState { func toRuntimeAPIContainerState(state string) runtimeapi.ContainerState {
// Parse the state string in dockertypes.Container. This could break when // Parse the state string in dockertypes.Container. This could break when
// we upgrade docker. // we upgrade docker.
switch { switch {
case strings.HasPrefix(state, statusRunningPrefix): case strings.HasPrefix(state, statusRunningPrefix):
return runtimeApi.ContainerState_CONTAINER_RUNNING return runtimeapi.ContainerState_CONTAINER_RUNNING
case strings.HasPrefix(state, statusExitedPrefix): case strings.HasPrefix(state, statusExitedPrefix):
return runtimeApi.ContainerState_CONTAINER_EXITED return runtimeapi.ContainerState_CONTAINER_EXITED
case strings.HasPrefix(state, statusCreatedPrefix): case strings.HasPrefix(state, statusCreatedPrefix):
return runtimeApi.ContainerState_CONTAINER_CREATED return runtimeapi.ContainerState_CONTAINER_CREATED
default: default:
return runtimeApi.ContainerState_CONTAINER_UNKNOWN return runtimeapi.ContainerState_CONTAINER_UNKNOWN
} }
} }
func toRuntimeAPISandboxState(state string) runtimeApi.PodSandboxState { func toRuntimeAPISandboxState(state string) runtimeapi.PodSandboxState {
// Parse the state string in dockertypes.Container. This could break when // Parse the state string in dockertypes.Container. This could break when
// we upgrade docker. // we upgrade docker.
switch { switch {
case strings.HasPrefix(state, statusRunningPrefix): case strings.HasPrefix(state, statusRunningPrefix):
return runtimeApi.PodSandboxState_SANDBOX_READY return runtimeapi.PodSandboxState_SANDBOX_READY
default: default:
return runtimeApi.PodSandboxState_SANDBOX_NOTREADY return runtimeapi.PodSandboxState_SANDBOX_NOTREADY
} }
} }
func toRuntimeAPISandbox(c *dockertypes.Container) (*runtimeApi.PodSandbox, error) { func toRuntimeAPISandbox(c *dockertypes.Container) (*runtimeapi.PodSandbox, error) {
state := toRuntimeAPISandboxState(c.Status) state := toRuntimeAPISandboxState(c.Status)
if len(c.Names) == 0 { if len(c.Names) == 0 {
return nil, fmt.Errorf("unexpected empty sandbox name: %+v", c) return nil, fmt.Errorf("unexpected empty sandbox name: %+v", c)
@@ -156,7 +156,7 @@ func toRuntimeAPISandbox(c *dockertypes.Container) (*runtimeApi.PodSandbox, erro
labels, annotations := extractLabels(c.Labels) labels, annotations := extractLabels(c.Labels)
// The timestamp in dockertypes.Container is in seconds. // The timestamp in dockertypes.Container is in seconds.
createdAt := c.Created * int64(time.Second) createdAt := c.Created * int64(time.Second)
return &runtimeApi.PodSandbox{ return &runtimeapi.PodSandbox{
Id: &c.ID, Id: &c.ID,
Metadata: metadata, Metadata: metadata,
State: &state, State: &state,

View File

@@ -22,18 +22,18 @@ import (
dockertypes "github.com/docker/engine-api/types" dockertypes "github.com/docker/engine-api/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
func TestConvertDockerStatusToRuntimeAPIState(t *testing.T) { func TestConvertDockerStatusToRuntimeAPIState(t *testing.T) {
testCases := []struct { testCases := []struct {
input string input string
expected runtimeApi.ContainerState expected runtimeapi.ContainerState
}{ }{
{input: "Up 5 hours", expected: runtimeApi.ContainerState_CONTAINER_RUNNING}, {input: "Up 5 hours", expected: runtimeapi.ContainerState_CONTAINER_RUNNING},
{input: "Exited (0) 2 hours ago", expected: runtimeApi.ContainerState_CONTAINER_EXITED}, {input: "Exited (0) 2 hours ago", expected: runtimeapi.ContainerState_CONTAINER_EXITED},
{input: "Created", expected: runtimeApi.ContainerState_CONTAINER_CREATED}, {input: "Created", expected: runtimeapi.ContainerState_CONTAINER_CREATED},
{input: "Random string", expected: runtimeApi.ContainerState_CONTAINER_UNKNOWN}, {input: "Random string", expected: runtimeapi.ContainerState_CONTAINER_UNKNOWN},
} }
for _, test := range testCases { for _, test := range testCases {

View File

@@ -28,12 +28,12 @@ import (
dockerstrslice "github.com/docker/engine-api/types/strslice" dockerstrslice "github.com/docker/engine-api/types/strslice"
"github.com/golang/glog" "github.com/golang/glog"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/dockertools"
) )
// ListContainers lists all containers matching the filter. // ListContainers lists all containers matching the filter.
func (ds *dockerService) ListContainers(filter *runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) { func (ds *dockerService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
opts := dockertypes.ContainerListOptions{All: true} opts := dockertypes.ContainerListOptions{All: true}
opts.Filter = dockerfilters.NewArgs() opts.Filter = dockerfilters.NewArgs()
@@ -63,7 +63,7 @@ func (ds *dockerService) ListContainers(filter *runtimeApi.ContainerFilter) ([]*
return nil, err return nil, err
} }
// Convert docker to runtime api containers. // Convert docker to runtime api containers.
result := []*runtimeApi.Container{} result := []*runtimeapi.Container{}
for i := range containers { for i := range containers {
c := containers[i] c := containers[i]
@@ -82,7 +82,7 @@ func (ds *dockerService) ListContainers(filter *runtimeApi.ContainerFilter) ([]*
// Docker cannot store the log to an arbitrary location (yet), so we create an // Docker cannot store the log to an arbitrary location (yet), so we create an
// symlink at LogPath, linking to the actual path of the log. // symlink at LogPath, linking to the actual path of the log.
// TODO: check if the default values returned by the runtime API are ok. // TODO: check if the default values returned by the runtime API are ok.
func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeApi.ContainerConfig, sandboxConfig *runtimeApi.PodSandboxConfig) (string, error) { func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
if config == nil { if config == nil {
return "", fmt.Errorf("container config is nil") return "", fmt.Errorf("container config is nil")
} }
@@ -283,7 +283,7 @@ func getContainerTimestamps(r *dockertypes.ContainerJSON) (time.Time, time.Time,
} }
// ContainerStatus inspects the docker container and returns the status. // ContainerStatus inspects the docker container and returns the status.
func (ds *dockerService) ContainerStatus(containerID string) (*runtimeApi.ContainerStatus, error) { func (ds *dockerService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
r, err := ds.client.InspectContainer(containerID) r, err := ds.client.InspectContainer(containerID)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -303,11 +303,11 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeApi.Contai
imageID := toPullableImageID(r.Image, ir) imageID := toPullableImageID(r.Image, ir)
// Convert the mounts. // Convert the mounts.
mounts := []*runtimeApi.Mount{} mounts := []*runtimeapi.Mount{}
for i := range r.Mounts { for i := range r.Mounts {
m := r.Mounts[i] m := r.Mounts[i]
readonly := !m.RW readonly := !m.RW
mounts = append(mounts, &runtimeApi.Mount{ mounts = append(mounts, &runtimeapi.Mount{
HostPath: &m.Source, HostPath: &m.Source,
ContainerPath: &m.Destination, ContainerPath: &m.Destination,
Readonly: &readonly, Readonly: &readonly,
@@ -315,11 +315,11 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeApi.Contai
}) })
} }
// Interpret container states. // Interpret container states.
var state runtimeApi.ContainerState var state runtimeapi.ContainerState
var reason, message string var reason, message string
if r.State.Running { if r.State.Running {
// Container is running. // Container is running.
state = runtimeApi.ContainerState_CONTAINER_RUNNING state = runtimeapi.ContainerState_CONTAINER_RUNNING
} else { } else {
// Container is *not* running. We need to get more details. // Container is *not* running. We need to get more details.
// * Case 1: container has run and exited with non-zero finishedAt // * Case 1: container has run and exited with non-zero finishedAt
@@ -328,7 +328,7 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeApi.Contai
// time, but a non-zero exit code. // time, but a non-zero exit code.
// * Case 3: container has been created, but not started (yet). // * Case 3: container has been created, but not started (yet).
if !finishedAt.IsZero() { // Case 1 if !finishedAt.IsZero() { // Case 1
state = runtimeApi.ContainerState_CONTAINER_EXITED state = runtimeapi.ContainerState_CONTAINER_EXITED
switch { switch {
case r.State.OOMKilled: case r.State.OOMKilled:
// TODO: consider exposing OOMKilled via the runtimeAPI. // TODO: consider exposing OOMKilled via the runtimeAPI.
@@ -341,13 +341,13 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeApi.Contai
reason = "Error" reason = "Error"
} }
} else if r.State.ExitCode != 0 { // Case 2 } else if r.State.ExitCode != 0 { // Case 2
state = runtimeApi.ContainerState_CONTAINER_EXITED state = runtimeapi.ContainerState_CONTAINER_EXITED
// Adjust finshedAt and startedAt time to createdAt time to avoid // Adjust finshedAt and startedAt time to createdAt time to avoid
// the confusion. // the confusion.
finishedAt, startedAt = createdAt, createdAt finishedAt, startedAt = createdAt, createdAt
reason = "ContainerCannotRun" reason = "ContainerCannotRun"
} else { // Case 3 } else { // Case 3
state = runtimeApi.ContainerState_CONTAINER_CREATED state = runtimeapi.ContainerState_CONTAINER_CREATED
} }
message = r.State.Error message = r.State.Error
} }
@@ -362,10 +362,10 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeApi.Contai
} }
labels, annotations := extractLabels(r.Config.Labels) labels, annotations := extractLabels(r.Config.Labels)
return &runtimeApi.ContainerStatus{ return &runtimeapi.ContainerStatus{
Id: &r.ID, Id: &r.ID,
Metadata: metadata, Metadata: metadata,
Image: &runtimeApi.ImageSpec{Image: &r.Config.Image}, Image: &runtimeapi.ImageSpec{Image: &r.Config.Image},
ImageRef: &imageID, ImageRef: &imageID,
Mounts: mounts, Mounts: mounts,
ExitCode: &exitCode, ExitCode: &exitCode,

View File

@@ -24,18 +24,18 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
) )
// A helper to create a basic config. // A helper to create a basic config.
func makeContainerConfig(sConfig *runtimeApi.PodSandboxConfig, name, image string, attempt uint32, labels, annotations map[string]string) *runtimeApi.ContainerConfig { func makeContainerConfig(sConfig *runtimeapi.PodSandboxConfig, name, image string, attempt uint32, labels, annotations map[string]string) *runtimeapi.ContainerConfig {
return &runtimeApi.ContainerConfig{ return &runtimeapi.ContainerConfig{
Metadata: &runtimeApi.ContainerMetadata{ Metadata: &runtimeapi.ContainerMetadata{
Name: &name, Name: &name,
Attempt: &attempt, Attempt: &attempt,
}, },
Image: &runtimeApi.ImageSpec{Image: &image}, Image: &runtimeapi.ImageSpec{Image: &image},
Labels: labels, Labels: labels,
Annotations: annotations, Annotations: annotations,
} }
@@ -48,8 +48,8 @@ func TestListContainers(t *testing.T) {
podName, namespace := "foo", "bar" podName, namespace := "foo", "bar"
containerName, image := "sidecar", "logger" containerName, image := "sidecar", "logger"
configs := []*runtimeApi.ContainerConfig{} configs := []*runtimeapi.ContainerConfig{}
sConfigs := []*runtimeApi.PodSandboxConfig{} sConfigs := []*runtimeapi.PodSandboxConfig{}
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
s := makeSandboxConfig(fmt.Sprintf("%s%d", podName, i), s := makeSandboxConfig(fmt.Sprintf("%s%d", podName, i),
fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0) fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0)
@@ -61,8 +61,8 @@ func TestListContainers(t *testing.T) {
configs = append(configs, c) configs = append(configs, c)
} }
expected := []*runtimeApi.Container{} expected := []*runtimeapi.Container{}
state := runtimeApi.ContainerState_CONTAINER_RUNNING state := runtimeapi.ContainerState_CONTAINER_RUNNING
var createdAt int64 = 0 var createdAt int64 = 0
for i := range configs { for i := range configs {
// We don't care about the sandbox id; pass a bogus one. // We don't care about the sandbox id; pass a bogus one.
@@ -75,7 +75,7 @@ func TestListContainers(t *testing.T) {
imageRef := "" // FakeDockerClient doesn't populate ImageRef yet. imageRef := "" // FakeDockerClient doesn't populate ImageRef yet.
// Prepend to the expected list because ListContainers returns // Prepend to the expected list because ListContainers returns
// the most recent containers first. // the most recent containers first.
expected = append([]*runtimeApi.Container{{ expected = append([]*runtimeapi.Container{{
Metadata: configs[i].Metadata, Metadata: configs[i].Metadata,
Id: &id, Id: &id,
PodSandboxId: &sandboxID, PodSandboxId: &sandboxID,
@@ -105,13 +105,13 @@ func TestContainerStatus(t *testing.T) {
var defaultTime time.Time var defaultTime time.Time
dt := defaultTime.UnixNano() dt := defaultTime.UnixNano()
ct, st, ft := dt, dt, dt ct, st, ft := dt, dt, dt
state := runtimeApi.ContainerState_CONTAINER_CREATED state := runtimeapi.ContainerState_CONTAINER_CREATED
// The following variables are not set in FakeDockerClient. // The following variables are not set in FakeDockerClient.
imageRef := DockerImageIDPrefix + "" imageRef := DockerImageIDPrefix + ""
exitCode := int32(0) exitCode := int32(0)
var reason, message string var reason, message string
expected := &runtimeApi.ContainerStatus{ expected := &runtimeapi.ContainerStatus{
State: &state, State: &state,
CreatedAt: &ct, CreatedAt: &ct,
StartedAt: &st, StartedAt: &st,
@@ -122,7 +122,7 @@ func TestContainerStatus(t *testing.T) {
ExitCode: &exitCode, ExitCode: &exitCode,
Reason: &reason, Reason: &reason,
Message: &message, Message: &message,
Mounts: []*runtimeApi.Mount{}, Mounts: []*runtimeapi.Mount{},
Labels: config.Labels, Labels: config.Labels,
Annotations: config.Annotations, Annotations: config.Annotations,
} }
@@ -149,7 +149,7 @@ func TestContainerStatus(t *testing.T) {
// Advance the clock and start the container. // Advance the clock and start the container.
fClock.SetTime(time.Now()) fClock.SetTime(time.Now())
*expected.StartedAt = fClock.Now().UnixNano() *expected.StartedAt = fClock.Now().UnixNano()
*expected.State = runtimeApi.ContainerState_CONTAINER_RUNNING *expected.State = runtimeapi.ContainerState_CONTAINER_RUNNING
err = ds.StartContainer(id) err = ds.StartContainer(id)
assert.NoError(t, err) assert.NoError(t, err)
@@ -159,7 +159,7 @@ func TestContainerStatus(t *testing.T) {
// Advance the clock and stop the container. // Advance the clock and stop the container.
fClock.SetTime(time.Now().Add(1 * time.Hour)) fClock.SetTime(time.Now().Add(1 * time.Hour))
*expected.FinishedAt = fClock.Now().UnixNano() *expected.FinishedAt = fClock.Now().UnixNano()
*expected.State = runtimeApi.ContainerState_CONTAINER_EXITED *expected.State = runtimeapi.ContainerState_CONTAINER_EXITED
*expected.Reason = "Completed" *expected.Reason = "Completed"
err = ds.StopContainer(id, 0) err = ds.StopContainer(id, 0)

View File

@@ -18,14 +18,14 @@ package dockershim
import ( import (
dockertypes "github.com/docker/engine-api/types" dockertypes "github.com/docker/engine-api/types"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/dockertools"
) )
// This file implements methods in ImageManagerService. // This file implements methods in ImageManagerService.
// ListImages lists existing images. // ListImages lists existing images.
func (ds *dockerService) ListImages(filter *runtimeApi.ImageFilter) ([]*runtimeApi.Image, error) { func (ds *dockerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
opts := dockertypes.ImageListOptions{} opts := dockertypes.ImageListOptions{}
if filter != nil { if filter != nil {
if imgSpec := filter.GetImage(); imgSpec != nil { if imgSpec := filter.GetImage(); imgSpec != nil {
@@ -38,7 +38,7 @@ func (ds *dockerService) ListImages(filter *runtimeApi.ImageFilter) ([]*runtimeA
return nil, err return nil, err
} }
result := []*runtimeApi.Image{} result := []*runtimeapi.Image{}
for _, i := range images { for _, i := range images {
apiImage, err := imageToRuntimeAPIImage(&i) apiImage, err := imageToRuntimeAPIImage(&i)
if err != nil { if err != nil {
@@ -51,7 +51,7 @@ func (ds *dockerService) ListImages(filter *runtimeApi.ImageFilter) ([]*runtimeA
} }
// ImageStatus returns the status of the image, returns nil if the image doesn't present. // ImageStatus returns the status of the image, returns nil if the image doesn't present.
func (ds *dockerService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi.Image, error) { func (ds *dockerService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
imageInspect, err := ds.client.InspectImageByRef(image.GetImage()) imageInspect, err := ds.client.InspectImageByRef(image.GetImage())
if err != nil { if err != nil {
if dockertools.IsImageNotFoundError(err) { if dockertools.IsImageNotFoundError(err) {
@@ -63,7 +63,7 @@ func (ds *dockerService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi.I
} }
// PullImage pulls an image with authentication config. // PullImage pulls an image with authentication config.
func (ds *dockerService) PullImage(image *runtimeApi.ImageSpec, auth *runtimeApi.AuthConfig) error { func (ds *dockerService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) error {
return ds.client.PullImage(image.GetImage(), return ds.client.PullImage(image.GetImage(),
dockertypes.AuthConfig{ dockertypes.AuthConfig{
Username: auth.GetUsername(), Username: auth.GetUsername(),
@@ -77,7 +77,7 @@ func (ds *dockerService) PullImage(image *runtimeApi.ImageSpec, auth *runtimeApi
} }
// RemoveImage removes the image. // RemoveImage removes the image.
func (ds *dockerService) RemoveImage(image *runtimeApi.ImageSpec) error { func (ds *dockerService) RemoveImage(image *runtimeapi.ImageSpec) error {
// If the image has multiple tags, we need to remove all the tags // If the image has multiple tags, we need to remove all the tags
// TODO: We assume image.Image is image ID here, which is true in the current implementation // TODO: We assume image.Image is image ID here, which is true in the current implementation
// of kubelet, but we should still clarify this in CRI. // of kubelet, but we should still clarify this in CRI.

View File

@@ -21,7 +21,7 @@ import (
dockertypes "github.com/docker/engine-api/types" dockertypes "github.com/docker/engine-api/types"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/dockertools"
) )
@@ -29,7 +29,7 @@ func TestRemoveImage(t *testing.T) {
ds, fakeDocker, _ := newTestDockerService() ds, fakeDocker, _ := newTestDockerService()
id := "1111" id := "1111"
fakeDocker.Image = &dockertypes.ImageInspect{ID: id, RepoTags: []string{"foo"}} fakeDocker.Image = &dockertypes.ImageInspect{ID: id, RepoTags: []string{"foo"}}
ds.RemoveImage(&runtimeApi.ImageSpec{Image: &id}) ds.RemoveImage(&runtimeapi.ImageSpec{Image: &id})
fakeDocker.AssertCallDetails(dockertools.NewCalledDetail("inspect_image", nil), fakeDocker.AssertCallDetails(dockertools.NewCalledDetail("inspect_image", nil),
dockertools.NewCalledDetail("remove_image", []interface{}{id, dockertypes.ImageRemoveOptions{PruneChildren: true}})) dockertools.NewCalledDetail("remove_image", []interface{}{id, dockertypes.ImageRemoveOptions{PruneChildren: true}}))
} }
@@ -38,7 +38,7 @@ func TestRemoveImageWithMultipleTags(t *testing.T) {
ds, fakeDocker, _ := newTestDockerService() ds, fakeDocker, _ := newTestDockerService()
id := "1111" id := "1111"
fakeDocker.Image = &dockertypes.ImageInspect{ID: id, RepoTags: []string{"foo", "bar"}} fakeDocker.Image = &dockertypes.ImageInspect{ID: id, RepoTags: []string{"foo", "bar"}}
ds.RemoveImage(&runtimeApi.ImageSpec{Image: &id}) ds.RemoveImage(&runtimeapi.ImageSpec{Image: &id})
fakeDocker.AssertCallDetails(dockertools.NewCalledDetail("inspect_image", nil), fakeDocker.AssertCallDetails(dockertools.NewCalledDetail("inspect_image", nil),
dockertools.NewCalledDetail("remove_image", []interface{}{"foo", dockertypes.ImageRemoveOptions{PruneChildren: true}}), dockertools.NewCalledDetail("remove_image", []interface{}{"foo", dockertypes.ImageRemoveOptions{PruneChildren: true}}),
dockertools.NewCalledDetail("remove_image", []interface{}{"bar", dockertypes.ImageRemoveOptions{PruneChildren: true}})) dockertools.NewCalledDetail("remove_image", []interface{}{"bar", dockertypes.ImageRemoveOptions{PruneChildren: true}}))

View File

@@ -24,7 +24,7 @@ import (
dockerfilters "github.com/docker/engine-api/types/filters" dockerfilters "github.com/docker/engine-api/types/filters"
"github.com/golang/glog" "github.com/golang/glog"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/types"
@@ -48,7 +48,7 @@ const (
// For docker, PodSandbox is implemented by a container holding the network // For docker, PodSandbox is implemented by a container holding the network
// namespace for the pod. // namespace for the pod.
// Note: docker doesn't use LogDirectory (yet). // Note: docker doesn't use LogDirectory (yet).
func (ds *dockerService) RunPodSandbox(config *runtimeApi.PodSandboxConfig) (string, error) { func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error) {
// Step 1: Pull the image for the sandbox. // Step 1: Pull the image for the sandbox.
image := defaultSandboxImage image := defaultSandboxImage
podSandboxImage := ds.podSandboxImage podSandboxImage := ds.podSandboxImage
@@ -179,7 +179,7 @@ func (ds *dockerService) getIP(sandbox *dockertypes.ContainerJSON) (string, erro
} }
// PodSandboxStatus returns the status of the PodSandbox. // PodSandboxStatus returns the status of the PodSandbox.
func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeApi.PodSandboxStatus, error) { func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) {
// Inspect the container. // Inspect the container.
r, err := ds.client.InspectContainer(podSandboxID) r, err := ds.client.InspectContainer(podSandboxID)
if err != nil { if err != nil {
@@ -194,15 +194,15 @@ func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeApi.PodS
ct := createdAt.UnixNano() ct := createdAt.UnixNano()
// Translate container to sandbox state. // Translate container to sandbox state.
state := runtimeApi.PodSandboxState_SANDBOX_NOTREADY state := runtimeapi.PodSandboxState_SANDBOX_NOTREADY
if r.State.Running { if r.State.Running {
state = runtimeApi.PodSandboxState_SANDBOX_READY state = runtimeapi.PodSandboxState_SANDBOX_READY
} }
IP, err := ds.getIP(r) IP, err := ds.getIP(r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
network := &runtimeApi.PodSandboxNetworkStatus{Ip: &IP} network := &runtimeapi.PodSandboxNetworkStatus{Ip: &IP}
netNS := getNetworkNamespace(r) netNS := getNetworkNamespace(r)
metadata, err := parseSandboxName(r.Name) metadata, err := parseSandboxName(r.Name)
@@ -211,7 +211,7 @@ func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeApi.PodS
} }
hostNetwork := sharesHostNetwork(r) hostNetwork := sharesHostNetwork(r)
labels, annotations := extractLabels(r.Config.Labels) labels, annotations := extractLabels(r.Config.Labels)
return &runtimeApi.PodSandboxStatus{ return &runtimeapi.PodSandboxStatus{
Id: &r.ID, Id: &r.ID,
State: &state, State: &state,
CreatedAt: &ct, CreatedAt: &ct,
@@ -219,10 +219,10 @@ func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeApi.PodS
Labels: labels, Labels: labels,
Annotations: annotations, Annotations: annotations,
Network: network, Network: network,
Linux: &runtimeApi.LinuxPodSandboxStatus{ Linux: &runtimeapi.LinuxPodSandboxStatus{
Namespaces: &runtimeApi.Namespace{ Namespaces: &runtimeapi.Namespace{
Network: &netNS, Network: &netNS,
Options: &runtimeApi.NamespaceOption{ Options: &runtimeapi.NamespaceOption{
HostNetwork: &hostNetwork, HostNetwork: &hostNetwork,
}, },
}, },
@@ -231,7 +231,7 @@ func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeApi.PodS
} }
// ListPodSandbox returns a list of Sandbox. // ListPodSandbox returns a list of Sandbox.
func (ds *dockerService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]*runtimeApi.PodSandbox, error) { func (ds *dockerService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
// By default, list all containers whether they are running or not. // By default, list all containers whether they are running or not.
opts := dockertypes.ContainerListOptions{All: true} opts := dockertypes.ContainerListOptions{All: true}
filterOutReadySandboxes := false filterOutReadySandboxes := false
@@ -246,11 +246,11 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]
f.Add("id", filter.GetId()) f.Add("id", filter.GetId())
} }
if filter.State != nil { if filter.State != nil {
if filter.GetState() == runtimeApi.PodSandboxState_SANDBOX_READY { if filter.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
// Only list running containers. // Only list running containers.
opts.All = false opts.All = false
} else { } else {
// runtimeApi.PodSandboxState_SANDBOX_NOTREADY can mean the // runtimeapi.PodSandboxState_SANDBOX_NOTREADY can mean the
// container is in any of the non-running state (e.g., created, // container is in any of the non-running state (e.g., created,
// exited). We can't tell docker to filter out running // exited). We can't tell docker to filter out running
// containers directly, so we'll need to filter them out // containers directly, so we'll need to filter them out
@@ -271,7 +271,7 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]
} }
// Convert docker containers to runtime api sandboxes. // Convert docker containers to runtime api sandboxes.
result := []*runtimeApi.PodSandbox{} result := []*runtimeapi.PodSandbox{}
for i := range containers { for i := range containers {
c := containers[i] c := containers[i]
converted, err := toRuntimeAPISandbox(&c) converted, err := toRuntimeAPISandbox(&c)
@@ -279,7 +279,7 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]
glog.V(4).Infof("Unable to convert docker to runtime API sandbox: %v", err) glog.V(4).Infof("Unable to convert docker to runtime API sandbox: %v", err)
continue continue
} }
if filterOutReadySandboxes && converted.GetState() == runtimeApi.PodSandboxState_SANDBOX_READY { if filterOutReadySandboxes && converted.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
continue continue
} }
@@ -289,7 +289,7 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]
} }
// applySandboxLinuxOptions applies LinuxPodSandboxConfig to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig. // applySandboxLinuxOptions applies LinuxPodSandboxConfig to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig.
func (ds *dockerService) applySandboxLinuxOptions(hc *dockercontainer.HostConfig, lc *runtimeApi.LinuxPodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string) error { func (ds *dockerService) applySandboxLinuxOptions(hc *dockercontainer.HostConfig, lc *runtimeapi.LinuxPodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string) error {
// Apply Cgroup options. // Apply Cgroup options.
// TODO: Check if this works with per-pod cgroups. // TODO: Check if this works with per-pod cgroups.
hc.CgroupParent = lc.GetCgroupParent() hc.CgroupParent = lc.GetCgroupParent()
@@ -299,8 +299,8 @@ func (ds *dockerService) applySandboxLinuxOptions(hc *dockercontainer.HostConfig
return nil return nil
} }
// makeSandboxDockerConfig returns dockertypes.ContainerCreateConfig based on runtimeApi.PodSandboxConfig. // makeSandboxDockerConfig returns dockertypes.ContainerCreateConfig based on runtimeapi.PodSandboxConfig.
func (ds *dockerService) makeSandboxDockerConfig(c *runtimeApi.PodSandboxConfig, image string) (*dockertypes.ContainerCreateConfig, error) { func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig, image string) (*dockertypes.ContainerCreateConfig, error) {
// Merge annotations and labels because docker supports only labels. // Merge annotations and labels because docker supports only labels.
labels := makeLabels(c.GetLabels(), c.GetAnnotations()) labels := makeLabels(c.GetLabels(), c.GetAnnotations())
// Apply a label to distinguish sandboxes from regular containers. // Apply a label to distinguish sandboxes from regular containers.

View File

@@ -24,19 +24,19 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/types"
) )
// A helper to create a basic config. // A helper to create a basic config.
func makeSandboxConfig(name, namespace, uid string, attempt uint32) *runtimeApi.PodSandboxConfig { func makeSandboxConfig(name, namespace, uid string, attempt uint32) *runtimeapi.PodSandboxConfig {
return makeSandboxConfigWithLabelsAndAnnotations(name, namespace, uid, attempt, map[string]string{}, map[string]string{}) return makeSandboxConfigWithLabelsAndAnnotations(name, namespace, uid, attempt, map[string]string{}, map[string]string{})
} }
func makeSandboxConfigWithLabelsAndAnnotations(name, namespace, uid string, attempt uint32, labels, annotations map[string]string) *runtimeApi.PodSandboxConfig { func makeSandboxConfigWithLabelsAndAnnotations(name, namespace, uid string, attempt uint32, labels, annotations map[string]string) *runtimeapi.PodSandboxConfig {
return &runtimeApi.PodSandboxConfig{ return &runtimeapi.PodSandboxConfig{
Metadata: &runtimeApi.PodSandboxMetadata{ Metadata: &runtimeapi.PodSandboxMetadata{
Name: &name, Name: &name,
Namespace: &namespace, Namespace: &namespace,
Uid: &uid, Uid: &uid,
@@ -52,7 +52,7 @@ func makeSandboxConfigWithLabelsAndAnnotations(name, namespace, uid string, atte
func TestListSandboxes(t *testing.T) { func TestListSandboxes(t *testing.T) {
ds, _, _ := newTestDockerService() ds, _, _ := newTestDockerService()
name, namespace := "foo", "bar" name, namespace := "foo", "bar"
configs := []*runtimeApi.PodSandboxConfig{} configs := []*runtimeapi.PodSandboxConfig{}
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
c := makeSandboxConfigWithLabelsAndAnnotations(fmt.Sprintf("%s%d", name, i), c := makeSandboxConfigWithLabelsAndAnnotations(fmt.Sprintf("%s%d", name, i),
fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0, fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0,
@@ -62,15 +62,15 @@ func TestListSandboxes(t *testing.T) {
configs = append(configs, c) configs = append(configs, c)
} }
expected := []*runtimeApi.PodSandbox{} expected := []*runtimeapi.PodSandbox{}
state := runtimeApi.PodSandboxState_SANDBOX_READY state := runtimeapi.PodSandboxState_SANDBOX_READY
var createdAt int64 = 0 var createdAt int64 = 0
for i := range configs { for i := range configs {
id, err := ds.RunPodSandbox(configs[i]) id, err := ds.RunPodSandbox(configs[i])
assert.NoError(t, err) assert.NoError(t, err)
// Prepend to the expected list because ListPodSandbox returns // Prepend to the expected list because ListPodSandbox returns
// the most recent sandbox first. // the most recent sandbox first.
expected = append([]*runtimeApi.PodSandbox{{ expected = append([]*runtimeapi.PodSandbox{{
Metadata: configs[i].Metadata, Metadata: configs[i].Metadata,
Id: &id, Id: &id,
State: &state, State: &state,
@@ -98,15 +98,15 @@ func TestSandboxStatus(t *testing.T) {
fakeIP := "2.3.4.5" fakeIP := "2.3.4.5"
fakeNS := fmt.Sprintf("/proc/%d/ns/net", os.Getpid()) fakeNS := fmt.Sprintf("/proc/%d/ns/net", os.Getpid())
state := runtimeApi.PodSandboxState_SANDBOX_READY state := runtimeapi.PodSandboxState_SANDBOX_READY
ct := int64(0) ct := int64(0)
hostNetwork := false hostNetwork := false
expected := &runtimeApi.PodSandboxStatus{ expected := &runtimeapi.PodSandboxStatus{
State: &state, State: &state,
CreatedAt: &ct, CreatedAt: &ct,
Metadata: config.Metadata, Metadata: config.Metadata,
Network: &runtimeApi.PodSandboxNetworkStatus{Ip: &fakeIP}, Network: &runtimeapi.PodSandboxNetworkStatus{Ip: &fakeIP},
Linux: &runtimeApi.LinuxPodSandboxStatus{Namespaces: &runtimeApi.Namespace{Network: &fakeNS, Options: &runtimeApi.NamespaceOption{HostNetwork: &hostNetwork}}}, Linux: &runtimeapi.LinuxPodSandboxStatus{Namespaces: &runtimeapi.Namespace{Network: &fakeNS, Options: &runtimeapi.NamespaceOption{HostNetwork: &hostNetwork}}},
Labels: labels, Labels: labels,
Annotations: annotations, Annotations: annotations,
} }
@@ -128,7 +128,7 @@ func TestSandboxStatus(t *testing.T) {
assert.Equal(t, expected, status) assert.Equal(t, expected, status)
// Stop the sandbox. // Stop the sandbox.
*expected.State = runtimeApi.PodSandboxState_SANDBOX_NOTREADY *expected.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
err = ds.StopPodSandbox(id) err = ds.StopPodSandbox(id)
assert.NoError(t, err) assert.NoError(t, err)
status, err = ds.PodSandboxStatus(id) status, err = ds.PodSandboxStatus(id)
@@ -186,9 +186,9 @@ func TestHostNetworkPluginInvocation(t *testing.T) {
map[string]string{"annotation": ns}, map[string]string{"annotation": ns},
) )
hostNetwork := true hostNetwork := true
c.Linux = &runtimeApi.LinuxPodSandboxConfig{ c.Linux = &runtimeapi.LinuxPodSandboxConfig{
SecurityContext: &runtimeApi.LinuxSandboxSecurityContext{ SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
NamespaceOptions: &runtimeApi.NamespaceOption{ NamespaceOptions: &runtimeapi.NamespaceOption{
HostNetwork: &hostNetwork, HostNetwork: &hostNetwork,
}, },
}, },

View File

@@ -24,8 +24,8 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/apis/componentconfig"
internalApi "k8s.io/kubernetes/pkg/kubelet/api" internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/cm" "k8s.io/kubernetes/pkg/kubelet/dockershim/cm"
"k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/dockertools"
@@ -141,8 +141,8 @@ func NewDockerService(client dockertools.DockerInterface, seccompProfileRoot str
// DockerService is an interface that embeds the new RuntimeService and // DockerService is an interface that embeds the new RuntimeService and
// ImageService interfaces. // ImageService interfaces.
type DockerService interface { type DockerService interface {
internalApi.RuntimeService internalapi.RuntimeService
internalApi.ImageManagerService internalapi.ImageManagerService
Start() error Start() error
// For serving streaming calls. // For serving streaming calls.
http.Handler http.Handler
@@ -160,7 +160,7 @@ type dockerService struct {
} }
// Version returns the runtime name, runtime version and runtime API version // Version returns the runtime name, runtime version and runtime API version
func (ds *dockerService) Version(_ string) (*runtimeApi.VersionResponse, error) { func (ds *dockerService) Version(_ string) (*runtimeapi.VersionResponse, error) {
v, err := ds.client.Version() v, err := ds.client.Version()
if err != nil { if err != nil {
return nil, fmt.Errorf("docker: failed to get docker version: %v", err) return nil, fmt.Errorf("docker: failed to get docker version: %v", err)
@@ -170,7 +170,7 @@ func (ds *dockerService) Version(_ string) (*runtimeApi.VersionResponse, error)
// Docker API version (e.g., 1.23) is not semver compatible. Add a ".0" // Docker API version (e.g., 1.23) is not semver compatible. Add a ".0"
// suffix to remedy this. // suffix to remedy this.
apiVersion := fmt.Sprintf("%s.0", v.APIVersion) apiVersion := fmt.Sprintf("%s.0", v.APIVersion)
return &runtimeApi.VersionResponse{ return &runtimeapi.VersionResponse{
Version: &runtimeAPIVersion, Version: &runtimeAPIVersion,
RuntimeName: &name, RuntimeName: &name,
RuntimeVersion: &v.Version, RuntimeVersion: &v.Version,
@@ -179,7 +179,7 @@ func (ds *dockerService) Version(_ string) (*runtimeApi.VersionResponse, error)
} }
// UpdateRuntimeConfig updates the runtime config. Currently only handles podCIDR updates. // UpdateRuntimeConfig updates the runtime config. Currently only handles podCIDR updates.
func (ds *dockerService) UpdateRuntimeConfig(runtimeConfig *runtimeApi.RuntimeConfig) (err error) { func (ds *dockerService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) (err error) {
if runtimeConfig == nil { if runtimeConfig == nil {
return return
} }
@@ -224,16 +224,16 @@ func (ds *dockerService) Start() error {
// Status returns the status of the runtime. // Status returns the status of the runtime.
// TODO(random-liu): Set network condition accordingly here. // TODO(random-liu): Set network condition accordingly here.
func (ds *dockerService) Status() (*runtimeApi.RuntimeStatus, error) { func (ds *dockerService) Status() (*runtimeapi.RuntimeStatus, error) {
runtimeReady := &runtimeApi.RuntimeCondition{ runtimeReady := &runtimeapi.RuntimeCondition{
Type: proto.String(runtimeApi.RuntimeReady), Type: proto.String(runtimeapi.RuntimeReady),
Status: proto.Bool(true), Status: proto.Bool(true),
} }
networkReady := &runtimeApi.RuntimeCondition{ networkReady := &runtimeapi.RuntimeCondition{
Type: proto.String(runtimeApi.NetworkReady), Type: proto.String(runtimeapi.NetworkReady),
Status: proto.Bool(true), Status: proto.Bool(true),
} }
conditions := []*runtimeApi.RuntimeCondition{runtimeReady, networkReady} conditions := []*runtimeapi.RuntimeCondition{runtimeReady, networkReady}
if _, err := ds.client.Version(); err != nil { if _, err := ds.client.Version(); err != nil {
runtimeReady.Status = proto.Bool(false) runtimeReady.Status = proto.Bool(false)
runtimeReady.Reason = proto.String("DockerDaemonNotReady") runtimeReady.Reason = proto.String("DockerDaemonNotReady")
@@ -244,7 +244,7 @@ func (ds *dockerService) Status() (*runtimeApi.RuntimeStatus, error) {
networkReady.Reason = proto.String("NetworkPluginNotReady") networkReady.Reason = proto.String("NetworkPluginNotReady")
networkReady.Message = proto.String(fmt.Sprintf("docker: network plugin is not ready: %v", err)) networkReady.Message = proto.String(fmt.Sprintf("docker: network plugin is not ready: %v", err))
} }
return &runtimeApi.RuntimeStatus{Conditions: conditions}, nil return &runtimeapi.RuntimeStatus{Conditions: conditions}, nil
} }
func (ds *dockerService) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (ds *dockerService) ServeHTTP(w http.ResponseWriter, r *http.Request) {

View File

@@ -24,7 +24,7 @@ import (
"github.com/golang/mock/gomock" "github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/kubelet/network" "k8s.io/kubernetes/pkg/kubelet/network"
@@ -48,7 +48,7 @@ func newTestDockerService() (*dockerService, *dockertools.FakeDockerClient, *clo
func TestStatus(t *testing.T) { func TestStatus(t *testing.T) {
ds, fDocker, _ := newTestDockerService() ds, fDocker, _ := newTestDockerService()
assertStatus := func(expected map[string]bool, status *runtimeApi.RuntimeStatus) { assertStatus := func(expected map[string]bool, status *runtimeapi.RuntimeStatus) {
conditions := status.GetConditions() conditions := status.GetConditions()
assert.Equal(t, len(expected), len(conditions)) assert.Equal(t, len(expected), len(conditions))
for k, v := range expected { for k, v := range expected {
@@ -64,8 +64,8 @@ func TestStatus(t *testing.T) {
status, err := ds.Status() status, err := ds.Status()
assert.NoError(t, err) assert.NoError(t, err)
assertStatus(map[string]bool{ assertStatus(map[string]bool{
runtimeApi.RuntimeReady: true, runtimeapi.RuntimeReady: true,
runtimeApi.NetworkReady: true, runtimeapi.NetworkReady: true,
}, status) }, status)
// Should not report ready status if version returns error. // Should not report ready status if version returns error.
@@ -73,8 +73,8 @@ func TestStatus(t *testing.T) {
status, err = ds.Status() status, err = ds.Status()
assert.NoError(t, err) assert.NoError(t, err)
assertStatus(map[string]bool{ assertStatus(map[string]bool{
runtimeApi.RuntimeReady: false, runtimeapi.RuntimeReady: false,
runtimeApi.NetworkReady: true, runtimeapi.NetworkReady: true,
}, status) }, status)
// Should not report ready status is network plugin returns error. // Should not report ready status is network plugin returns error.
@@ -85,7 +85,7 @@ func TestStatus(t *testing.T) {
status, err = ds.Status() status, err = ds.Status()
assert.NoError(t, err) assert.NoError(t, err)
assertStatus(map[string]bool{ assertStatus(map[string]bool{
runtimeApi.RuntimeReady: true, runtimeapi.RuntimeReady: true,
runtimeApi.NetworkReady: false, runtimeapi.NetworkReady: false,
}, status) }, status)
} }

View File

@@ -29,7 +29,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/types"
) )
@@ -62,7 +62,7 @@ func (v apiVersion) Compare(other string) (int, error) {
// generateEnvList converts KeyValue list to a list of strings, in the form of // generateEnvList converts KeyValue list to a list of strings, in the form of
// '<key>=<value>', which can be understood by docker. // '<key>=<value>', which can be understood by docker.
func generateEnvList(envs []*runtimeApi.KeyValue) (result []string) { func generateEnvList(envs []*runtimeapi.KeyValue) (result []string) {
for _, env := range envs { for _, env := range envs {
result = append(result, fmt.Sprintf("%s=%s", env.GetKey(), env.GetValue())) result = append(result, fmt.Sprintf("%s=%s", env.GetKey(), env.GetValue()))
} }
@@ -127,7 +127,7 @@ func extractLabels(input map[string]string) (map[string]string, map[string]strin
// '<HostPath>:<ContainerPath>:ro', if the path is read only, or // '<HostPath>:<ContainerPath>:ro', if the path is read only, or
// '<HostPath>:<ContainerPath>:Z', if the volume requires SELinux // '<HostPath>:<ContainerPath>:Z', if the volume requires SELinux
// relabeling and the pod provides an SELinux label // relabeling and the pod provides an SELinux label
func generateMountBindings(mounts []*runtimeApi.Mount) (result []string) { func generateMountBindings(mounts []*runtimeapi.Mount) (result []string) {
for _, m := range mounts { for _, m := range mounts {
bind := fmt.Sprintf("%s:%s", m.GetHostPath(), m.GetContainerPath()) bind := fmt.Sprintf("%s:%s", m.GetHostPath(), m.GetContainerPath())
readOnly := m.GetReadonly() readOnly := m.GetReadonly()
@@ -150,7 +150,7 @@ func generateMountBindings(mounts []*runtimeApi.Mount) (result []string) {
return return
} }
func makePortsAndBindings(pm []*runtimeApi.PortMapping) (map[dockernat.Port]struct{}, map[dockernat.Port][]dockernat.PortBinding) { func makePortsAndBindings(pm []*runtimeapi.PortMapping) (map[dockernat.Port]struct{}, map[dockernat.Port][]dockernat.PortBinding) {
exposedPorts := map[dockernat.Port]struct{}{} exposedPorts := map[dockernat.Port]struct{}{}
portBindings := map[dockernat.Port][]dockernat.PortBinding{} portBindings := map[dockernat.Port][]dockernat.PortBinding{}
for _, port := range pm { for _, port := range pm {
@@ -198,7 +198,7 @@ func makePortsAndBindings(pm []*runtimeApi.PortMapping) (map[dockernat.Port]stru
// getContainerSecurityOpt gets container security options from container and sandbox config, currently from sandbox // getContainerSecurityOpt gets container security options from container and sandbox config, currently from sandbox
// annotations. // annotations.
// It is an experimental feature and may be promoted to official runtime api in the future. // It is an experimental feature and may be promoted to official runtime api in the future.
func getContainerSecurityOpts(containerName string, sandboxConfig *runtimeApi.PodSandboxConfig, seccompProfileRoot string) ([]string, error) { func getContainerSecurityOpts(containerName string, sandboxConfig *runtimeapi.PodSandboxConfig, seccompProfileRoot string) ([]string, error) {
appArmorOpts, err := dockertools.GetAppArmorOpts(sandboxConfig.GetAnnotations(), containerName) appArmorOpts, err := dockertools.GetAppArmorOpts(sandboxConfig.GetAnnotations(), containerName)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -216,7 +216,7 @@ func getContainerSecurityOpts(containerName string, sandboxConfig *runtimeApi.Po
return opts, nil return opts, nil
} }
func getSandboxSecurityOpts(sandboxConfig *runtimeApi.PodSandboxConfig, seccompProfileRoot string) ([]string, error) { func getSandboxSecurityOpts(sandboxConfig *runtimeapi.PodSandboxConfig, seccompProfileRoot string) ([]string, error) {
// sandboxContainerName doesn't exist in the pod, so pod security options will be returned by default. // sandboxContainerName doesn't exist in the pod, so pod security options will be returned by default.
return getContainerSecurityOpts(sandboxContainerName, sandboxConfig, seccompProfileRoot) return getContainerSecurityOpts(sandboxContainerName, sandboxConfig, seccompProfileRoot)
} }

View File

@@ -23,7 +23,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/security/apparmor" "k8s.io/kubernetes/pkg/security/apparmor"
) )
@@ -43,13 +43,13 @@ func TestLabelsAndAnnotationsRoundTrip(t *testing.T) {
// TODO: Migrate the corresponding test to dockershim. // TODO: Migrate the corresponding test to dockershim.
func TestGetContainerSecurityOpts(t *testing.T) { func TestGetContainerSecurityOpts(t *testing.T) {
containerName := "bar" containerName := "bar"
makeConfig := func(annotations map[string]string) *runtimeApi.PodSandboxConfig { makeConfig := func(annotations map[string]string) *runtimeapi.PodSandboxConfig {
return makeSandboxConfigWithLabelsAndAnnotations("pod", "ns", "1234", 1, nil, annotations) return makeSandboxConfigWithLabelsAndAnnotations("pod", "ns", "1234", 1, nil, annotations)
} }
tests := []struct { tests := []struct {
msg string msg string
config *runtimeApi.PodSandboxConfig config *runtimeapi.PodSandboxConfig
expectedOpts []string expectedOpts []string
}{{ }{{
msg: "No security annotations", msg: "No security annotations",
@@ -106,13 +106,13 @@ func TestGetContainerSecurityOpts(t *testing.T) {
// TestGetSandboxSecurityOpts tests the logic of generating sandbox security options from sandbox annotations. // TestGetSandboxSecurityOpts tests the logic of generating sandbox security options from sandbox annotations.
func TestGetSandboxSecurityOpts(t *testing.T) { func TestGetSandboxSecurityOpts(t *testing.T) {
makeConfig := func(annotations map[string]string) *runtimeApi.PodSandboxConfig { makeConfig := func(annotations map[string]string) *runtimeapi.PodSandboxConfig {
return makeSandboxConfigWithLabelsAndAnnotations("pod", "ns", "1234", 1, nil, annotations) return makeSandboxConfigWithLabelsAndAnnotations("pod", "ns", "1234", 1, nil, annotations)
} }
tests := []struct { tests := []struct {
msg string msg string
config *runtimeApi.PodSandboxConfig config *runtimeapi.PodSandboxConfig
expectedOpts []string expectedOpts []string
}{{ }{{
msg: "No security annotations", msg: "No security annotations",

View File

@@ -21,7 +21,7 @@ import (
"strconv" "strconv"
"strings" "strings"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/kubelet/leaky" "k8s.io/kubernetes/pkg/kubelet/leaky"
) )
@@ -55,7 +55,7 @@ const (
DockerPullableImageIDPrefix = dockertools.DockerPullablePrefix DockerPullableImageIDPrefix = dockertools.DockerPullablePrefix
) )
func makeSandboxName(s *runtimeApi.PodSandboxConfig) string { func makeSandboxName(s *runtimeapi.PodSandboxConfig) string {
return strings.Join([]string{ return strings.Join([]string{
kubePrefix, // 0 kubePrefix, // 0
sandboxContainerName, // 1 sandboxContainerName, // 1
@@ -66,7 +66,7 @@ func makeSandboxName(s *runtimeApi.PodSandboxConfig) string {
}, nameDelimiter) }, nameDelimiter)
} }
func makeContainerName(s *runtimeApi.PodSandboxConfig, c *runtimeApi.ContainerConfig) string { func makeContainerName(s *runtimeapi.PodSandboxConfig, c *runtimeapi.ContainerConfig) string {
return strings.Join([]string{ return strings.Join([]string{
kubePrefix, // 0 kubePrefix, // 0
c.Metadata.GetName(), // 1: c.Metadata.GetName(), // 1:
@@ -87,7 +87,7 @@ func parseUint32(s string) (uint32, error) {
} }
// TODO: Evaluate whether we should rely on labels completely. // TODO: Evaluate whether we should rely on labels completely.
func parseSandboxName(name string) (*runtimeApi.PodSandboxMetadata, error) { func parseSandboxName(name string) (*runtimeapi.PodSandboxMetadata, error) {
// Docker adds a "/" prefix to names. so trim it. // Docker adds a "/" prefix to names. so trim it.
name = strings.TrimPrefix(name, "/") name = strings.TrimPrefix(name, "/")
@@ -104,7 +104,7 @@ func parseSandboxName(name string) (*runtimeApi.PodSandboxMetadata, error) {
return nil, fmt.Errorf("failed to parse the sandbox name %q: %v", name, err) return nil, fmt.Errorf("failed to parse the sandbox name %q: %v", name, err)
} }
return &runtimeApi.PodSandboxMetadata{ return &runtimeapi.PodSandboxMetadata{
Name: &parts[2], Name: &parts[2],
Namespace: &parts[3], Namespace: &parts[3],
Uid: &parts[4], Uid: &parts[4],
@@ -113,7 +113,7 @@ func parseSandboxName(name string) (*runtimeApi.PodSandboxMetadata, error) {
} }
// TODO: Evaluate whether we should rely on labels completely. // TODO: Evaluate whether we should rely on labels completely.
func parseContainerName(name string) (*runtimeApi.ContainerMetadata, error) { func parseContainerName(name string) (*runtimeapi.ContainerMetadata, error) {
// Docker adds a "/" prefix to names. so trim it. // Docker adds a "/" prefix to names. so trim it.
name = strings.TrimPrefix(name, "/") name = strings.TrimPrefix(name, "/")
@@ -130,7 +130,7 @@ func parseContainerName(name string) (*runtimeApi.ContainerMetadata, error) {
return nil, fmt.Errorf("failed to parse the container name %q: %v", name, err) return nil, fmt.Errorf("failed to parse the container name %q: %v", name, err)
} }
return &runtimeApi.ContainerMetadata{ return &runtimeapi.ContainerMetadata{
Name: &parts[1], Name: &parts[1],
Attempt: &attempt, Attempt: &attempt,
}, nil }, nil

View File

@@ -21,7 +21,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
func TestSandboxNameRoundTrip(t *testing.T) { func TestSandboxNameRoundTrip(t *testing.T) {
@@ -53,8 +53,8 @@ func TestNonParsableSandboxNames(t *testing.T) {
func TestContainerNameRoundTrip(t *testing.T) { func TestContainerNameRoundTrip(t *testing.T) {
sConfig := makeSandboxConfig("foo", "bar", "iamuid", 3) sConfig := makeSandboxConfig("foo", "bar", "iamuid", 3)
name, attempt := "pause", uint32(5) name, attempt := "pause", uint32(5)
config := &runtimeApi.ContainerConfig{ config := &runtimeapi.ContainerConfig{
Metadata: &runtimeApi.ContainerMetadata{ Metadata: &runtimeapi.ContainerMetadata{
Name: &name, Name: &name,
Attempt: &attempt, Attempt: &attempt,
}, },

View File

@@ -25,7 +25,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"google.golang.org/grpc" "google.golang.org/grpc"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockershim" "k8s.io/kubernetes/pkg/kubelet/dockershim"
"k8s.io/kubernetes/pkg/util/interrupt" "k8s.io/kubernetes/pkg/util/interrupt"
) )
@@ -69,8 +69,8 @@ func (s *DockerServer) Start() error {
} }
// Create the grpc server and register runtime and image services. // Create the grpc server and register runtime and image services.
s.server = grpc.NewServer() s.server = grpc.NewServer()
runtimeApi.RegisterRuntimeServiceServer(s.server, s.service) runtimeapi.RegisterRuntimeServiceServer(s.server, s.service)
runtimeApi.RegisterImageServiceServer(s.server, s.service) runtimeapi.RegisterImageServiceServer(s.server, s.service)
go func() { go func() {
// Use interrupt handler to make sure the server to be stopped properly. // Use interrupt handler to make sure the server to be stopped properly.
h := interrupt.New(nil, s.Stop) h := interrupt.New(nil, s.Stop)

View File

@@ -21,16 +21,16 @@ import (
"golang.org/x/net/context" "golang.org/x/net/context"
internalApi "k8s.io/kubernetes/pkg/kubelet/api" internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockershim" "k8s.io/kubernetes/pkg/kubelet/dockershim"
utilexec "k8s.io/kubernetes/pkg/util/exec" utilexec "k8s.io/kubernetes/pkg/util/exec"
) )
// DockerService is the interface implement CRI remote service server. // DockerService is the interface implement CRI remote service server.
type DockerService interface { type DockerService interface {
runtimeApi.RuntimeServiceServer runtimeapi.RuntimeServiceServer
runtimeApi.ImageServiceServer runtimeapi.ImageServiceServer
} }
// dockerService uses dockershim service to implement DockerService. // dockerService uses dockershim service to implement DockerService.
@@ -38,115 +38,115 @@ type DockerService interface {
// TODO(random-liu): Change the dockershim service to support context, and implement // TODO(random-liu): Change the dockershim service to support context, and implement
// internal services and remote services with the dockershim service. // internal services and remote services with the dockershim service.
type dockerService struct { type dockerService struct {
runtimeService internalApi.RuntimeService runtimeService internalapi.RuntimeService
imageService internalApi.ImageManagerService imageService internalapi.ImageManagerService
} }
func NewDockerService(s dockershim.DockerService) DockerService { func NewDockerService(s dockershim.DockerService) DockerService {
return &dockerService{runtimeService: s, imageService: s} return &dockerService{runtimeService: s, imageService: s}
} }
func (d *dockerService) Version(ctx context.Context, r *runtimeApi.VersionRequest) (*runtimeApi.VersionResponse, error) { func (d *dockerService) Version(ctx context.Context, r *runtimeapi.VersionRequest) (*runtimeapi.VersionResponse, error) {
return d.runtimeService.Version(r.GetVersion()) return d.runtimeService.Version(r.GetVersion())
} }
func (d *dockerService) Status(ctx context.Context, r *runtimeApi.StatusRequest) (*runtimeApi.StatusResponse, error) { func (d *dockerService) Status(ctx context.Context, r *runtimeapi.StatusRequest) (*runtimeapi.StatusResponse, error) {
status, err := d.runtimeService.Status() status, err := d.runtimeService.Status()
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.StatusResponse{Status: status}, nil return &runtimeapi.StatusResponse{Status: status}, nil
} }
func (d *dockerService) RunPodSandbox(ctx context.Context, r *runtimeApi.RunPodSandboxRequest) (*runtimeApi.RunPodSandboxResponse, error) { func (d *dockerService) RunPodSandbox(ctx context.Context, r *runtimeapi.RunPodSandboxRequest) (*runtimeapi.RunPodSandboxResponse, error) {
podSandboxId, err := d.runtimeService.RunPodSandbox(r.GetConfig()) podSandboxId, err := d.runtimeService.RunPodSandbox(r.GetConfig())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.RunPodSandboxResponse{PodSandboxId: &podSandboxId}, nil return &runtimeapi.RunPodSandboxResponse{PodSandboxId: &podSandboxId}, nil
} }
func (d *dockerService) StopPodSandbox(ctx context.Context, r *runtimeApi.StopPodSandboxRequest) (*runtimeApi.StopPodSandboxResponse, error) { func (d *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopPodSandboxRequest) (*runtimeapi.StopPodSandboxResponse, error) {
err := d.runtimeService.StopPodSandbox(r.GetPodSandboxId()) err := d.runtimeService.StopPodSandbox(r.GetPodSandboxId())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.StopPodSandboxResponse{}, nil return &runtimeapi.StopPodSandboxResponse{}, nil
} }
func (d *dockerService) RemovePodSandbox(ctx context.Context, r *runtimeApi.RemovePodSandboxRequest) (*runtimeApi.RemovePodSandboxResponse, error) { func (d *dockerService) RemovePodSandbox(ctx context.Context, r *runtimeapi.RemovePodSandboxRequest) (*runtimeapi.RemovePodSandboxResponse, error) {
err := d.runtimeService.RemovePodSandbox(r.GetPodSandboxId()) err := d.runtimeService.RemovePodSandbox(r.GetPodSandboxId())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.RemovePodSandboxResponse{}, nil return &runtimeapi.RemovePodSandboxResponse{}, nil
} }
func (d *dockerService) PodSandboxStatus(ctx context.Context, r *runtimeApi.PodSandboxStatusRequest) (*runtimeApi.PodSandboxStatusResponse, error) { func (d *dockerService) PodSandboxStatus(ctx context.Context, r *runtimeapi.PodSandboxStatusRequest) (*runtimeapi.PodSandboxStatusResponse, error) {
podSandboxStatus, err := d.runtimeService.PodSandboxStatus(r.GetPodSandboxId()) podSandboxStatus, err := d.runtimeService.PodSandboxStatus(r.GetPodSandboxId())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.PodSandboxStatusResponse{Status: podSandboxStatus}, nil return &runtimeapi.PodSandboxStatusResponse{Status: podSandboxStatus}, nil
} }
func (d *dockerService) ListPodSandbox(ctx context.Context, r *runtimeApi.ListPodSandboxRequest) (*runtimeApi.ListPodSandboxResponse, error) { func (d *dockerService) ListPodSandbox(ctx context.Context, r *runtimeapi.ListPodSandboxRequest) (*runtimeapi.ListPodSandboxResponse, error) {
items, err := d.runtimeService.ListPodSandbox(r.GetFilter()) items, err := d.runtimeService.ListPodSandbox(r.GetFilter())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.ListPodSandboxResponse{Items: items}, nil return &runtimeapi.ListPodSandboxResponse{Items: items}, nil
} }
func (d *dockerService) CreateContainer(ctx context.Context, r *runtimeApi.CreateContainerRequest) (*runtimeApi.CreateContainerResponse, error) { func (d *dockerService) CreateContainer(ctx context.Context, r *runtimeapi.CreateContainerRequest) (*runtimeapi.CreateContainerResponse, error) {
containerId, err := d.runtimeService.CreateContainer(r.GetPodSandboxId(), r.GetConfig(), r.GetSandboxConfig()) containerId, err := d.runtimeService.CreateContainer(r.GetPodSandboxId(), r.GetConfig(), r.GetSandboxConfig())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.CreateContainerResponse{ContainerId: &containerId}, nil return &runtimeapi.CreateContainerResponse{ContainerId: &containerId}, nil
} }
func (d *dockerService) StartContainer(ctx context.Context, r *runtimeApi.StartContainerRequest) (*runtimeApi.StartContainerResponse, error) { func (d *dockerService) StartContainer(ctx context.Context, r *runtimeapi.StartContainerRequest) (*runtimeapi.StartContainerResponse, error) {
err := d.runtimeService.StartContainer(r.GetContainerId()) err := d.runtimeService.StartContainer(r.GetContainerId())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.StartContainerResponse{}, nil return &runtimeapi.StartContainerResponse{}, nil
} }
func (d *dockerService) StopContainer(ctx context.Context, r *runtimeApi.StopContainerRequest) (*runtimeApi.StopContainerResponse, error) { func (d *dockerService) StopContainer(ctx context.Context, r *runtimeapi.StopContainerRequest) (*runtimeapi.StopContainerResponse, error) {
err := d.runtimeService.StopContainer(r.GetContainerId(), r.GetTimeout()) err := d.runtimeService.StopContainer(r.GetContainerId(), r.GetTimeout())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.StopContainerResponse{}, nil return &runtimeapi.StopContainerResponse{}, nil
} }
func (d *dockerService) RemoveContainer(ctx context.Context, r *runtimeApi.RemoveContainerRequest) (*runtimeApi.RemoveContainerResponse, error) { func (d *dockerService) RemoveContainer(ctx context.Context, r *runtimeapi.RemoveContainerRequest) (*runtimeapi.RemoveContainerResponse, error) {
err := d.runtimeService.RemoveContainer(r.GetContainerId()) err := d.runtimeService.RemoveContainer(r.GetContainerId())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.RemoveContainerResponse{}, nil return &runtimeapi.RemoveContainerResponse{}, nil
} }
func (d *dockerService) ListContainers(ctx context.Context, r *runtimeApi.ListContainersRequest) (*runtimeApi.ListContainersResponse, error) { func (d *dockerService) ListContainers(ctx context.Context, r *runtimeapi.ListContainersRequest) (*runtimeapi.ListContainersResponse, error) {
containers, err := d.runtimeService.ListContainers(r.GetFilter()) containers, err := d.runtimeService.ListContainers(r.GetFilter())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.ListContainersResponse{Containers: containers}, nil return &runtimeapi.ListContainersResponse{Containers: containers}, nil
} }
func (d *dockerService) ContainerStatus(ctx context.Context, r *runtimeApi.ContainerStatusRequest) (*runtimeApi.ContainerStatusResponse, error) { func (d *dockerService) ContainerStatus(ctx context.Context, r *runtimeapi.ContainerStatusRequest) (*runtimeapi.ContainerStatusResponse, error) {
status, err := d.runtimeService.ContainerStatus(r.GetContainerId()) status, err := d.runtimeService.ContainerStatus(r.GetContainerId())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.ContainerStatusResponse{Status: status}, nil return &runtimeapi.ContainerStatusResponse{Status: status}, nil
} }
func (d *dockerService) ExecSync(ctx context.Context, r *runtimeApi.ExecSyncRequest) (*runtimeApi.ExecSyncResponse, error) { func (d *dockerService) ExecSync(ctx context.Context, r *runtimeapi.ExecSyncRequest) (*runtimeapi.ExecSyncResponse, error) {
stdout, stderr, err := d.runtimeService.ExecSync(r.GetContainerId(), r.GetCmd(), time.Duration(r.GetTimeout())*time.Second) stdout, stderr, err := d.runtimeService.ExecSync(r.GetContainerId(), r.GetCmd(), time.Duration(r.GetTimeout())*time.Second)
var exitCode int32 var exitCode int32
if err != nil { if err != nil {
@@ -156,61 +156,61 @@ func (d *dockerService) ExecSync(ctx context.Context, r *runtimeApi.ExecSyncRequ
} }
exitCode = int32(exitError.ExitStatus()) exitCode = int32(exitError.ExitStatus())
} }
return &runtimeApi.ExecSyncResponse{ return &runtimeapi.ExecSyncResponse{
Stdout: stdout, Stdout: stdout,
Stderr: stderr, Stderr: stderr,
ExitCode: &exitCode, ExitCode: &exitCode,
}, nil }, nil
} }
func (d *dockerService) Exec(ctx context.Context, r *runtimeApi.ExecRequest) (*runtimeApi.ExecResponse, error) { func (d *dockerService) Exec(ctx context.Context, r *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
return d.runtimeService.Exec(r) return d.runtimeService.Exec(r)
} }
func (d *dockerService) Attach(ctx context.Context, r *runtimeApi.AttachRequest) (*runtimeApi.AttachResponse, error) { func (d *dockerService) Attach(ctx context.Context, r *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
return d.runtimeService.Attach(r) return d.runtimeService.Attach(r)
} }
func (d *dockerService) PortForward(ctx context.Context, r *runtimeApi.PortForwardRequest) (*runtimeApi.PortForwardResponse, error) { func (d *dockerService) PortForward(ctx context.Context, r *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
return d.runtimeService.PortForward(r) return d.runtimeService.PortForward(r)
} }
func (d *dockerService) UpdateRuntimeConfig(ctx context.Context, r *runtimeApi.UpdateRuntimeConfigRequest) (*runtimeApi.UpdateRuntimeConfigResponse, error) { func (d *dockerService) UpdateRuntimeConfig(ctx context.Context, r *runtimeapi.UpdateRuntimeConfigRequest) (*runtimeapi.UpdateRuntimeConfigResponse, error) {
err := d.runtimeService.UpdateRuntimeConfig(r.GetRuntimeConfig()) err := d.runtimeService.UpdateRuntimeConfig(r.GetRuntimeConfig())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.UpdateRuntimeConfigResponse{}, nil return &runtimeapi.UpdateRuntimeConfigResponse{}, nil
} }
func (d *dockerService) ListImages(ctx context.Context, r *runtimeApi.ListImagesRequest) (*runtimeApi.ListImagesResponse, error) { func (d *dockerService) ListImages(ctx context.Context, r *runtimeapi.ListImagesRequest) (*runtimeapi.ListImagesResponse, error) {
images, err := d.imageService.ListImages(r.GetFilter()) images, err := d.imageService.ListImages(r.GetFilter())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.ListImagesResponse{Images: images}, nil return &runtimeapi.ListImagesResponse{Images: images}, nil
} }
func (d *dockerService) ImageStatus(ctx context.Context, r *runtimeApi.ImageStatusRequest) (*runtimeApi.ImageStatusResponse, error) { func (d *dockerService) ImageStatus(ctx context.Context, r *runtimeapi.ImageStatusRequest) (*runtimeapi.ImageStatusResponse, error) {
image, err := d.imageService.ImageStatus(r.GetImage()) image, err := d.imageService.ImageStatus(r.GetImage())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.ImageStatusResponse{Image: image}, nil return &runtimeapi.ImageStatusResponse{Image: image}, nil
} }
func (d *dockerService) PullImage(ctx context.Context, r *runtimeApi.PullImageRequest) (*runtimeApi.PullImageResponse, error) { func (d *dockerService) PullImage(ctx context.Context, r *runtimeapi.PullImageRequest) (*runtimeapi.PullImageResponse, error) {
err := d.imageService.PullImage(r.GetImage(), r.GetAuth()) err := d.imageService.PullImage(r.GetImage(), r.GetAuth())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.PullImageResponse{}, nil return &runtimeapi.PullImageResponse{}, nil
} }
func (d *dockerService) RemoveImage(ctx context.Context, r *runtimeApi.RemoveImageRequest) (*runtimeApi.RemoveImageResponse, error) { func (d *dockerService) RemoveImage(ctx context.Context, r *runtimeapi.RemoveImageRequest) (*runtimeapi.RemoveImageResponse, error) {
err := d.imageService.RemoveImage(r.GetImage()) err := d.imageService.RemoveImage(r.GetImage())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &runtimeApi.RemoveImageResponse{}, nil return &runtimeapi.RemoveImageResponse{}, nil
} }

View File

@@ -41,7 +41,7 @@ import (
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
internalApi "k8s.io/kubernetes/pkg/kubelet/api" internalapi "k8s.io/kubernetes/pkg/kubelet/api"
"k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/config"
@@ -256,7 +256,7 @@ func makePodSourceConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps
return cfg, nil return cfg, nil
} }
func getRuntimeAndImageServices(config *componentconfig.KubeletConfiguration) (internalApi.RuntimeService, internalApi.ImageManagerService, error) { func getRuntimeAndImageServices(config *componentconfig.KubeletConfiguration) (internalapi.RuntimeService, internalapi.ImageManagerService, error) {
rs, err := remote.NewRemoteRuntimeService(config.RemoteRuntimeEndpoint, config.RuntimeRequestTimeout.Duration) rs, err := remote.NewRemoteRuntimeService(config.RemoteRuntimeEndpoint, config.RuntimeRequestTimeout.Duration)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
@@ -529,8 +529,8 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
// becomes the default. // becomes the default.
klet.networkPlugin = nil klet.networkPlugin = nil
var runtimeService internalApi.RuntimeService var runtimeService internalapi.RuntimeService
var imageService internalApi.ImageManagerService var imageService internalapi.ImageManagerService
var err error var err error
switch kubeCfg.ContainerRuntime { switch kubeCfg.ContainerRuntime {
@@ -548,8 +548,8 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
} }
klet.criHandler = ds klet.criHandler = ds
rs := ds.(internalApi.RuntimeService) rs := ds.(internalapi.RuntimeService)
is := ds.(internalApi.ImageManagerService) is := ds.(internalapi.ImageManagerService)
// This is an internal knob to switch between grpc and non-grpc // This is an internal knob to switch between grpc and non-grpc
// integration. // integration.
// TODO: Remove this knob once we switch to using GRPC completely. // TODO: Remove this knob once we switch to using GRPC completely.

View File

@@ -20,7 +20,7 @@ import (
"fmt" "fmt"
"math" "math"
"net" "net"
goRuntime "runtime" goruntime "runtime"
"sort" "sort"
"strings" "strings"
"time" "time"
@@ -177,8 +177,8 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
Name: string(kl.nodeName), Name: string(kl.nodeName),
Labels: map[string]string{ Labels: map[string]string{
unversioned.LabelHostname: kl.hostname, unversioned.LabelHostname: kl.hostname,
unversioned.LabelOS: goRuntime.GOOS, unversioned.LabelOS: goruntime.GOOS,
unversioned.LabelArch: goRuntime.GOARCH, unversioned.LabelArch: goruntime.GOARCH,
}, },
}, },
Spec: v1.NodeSpec{ Spec: v1.NodeSpec{
@@ -572,8 +572,8 @@ func (kl *Kubelet) setNodeStatusImages(node *v1.Node) {
// Set the GOOS and GOARCH for this node // Set the GOOS and GOARCH for this node
func (kl *Kubelet) setNodeStatusGoRuntime(node *v1.Node) { func (kl *Kubelet) setNodeStatusGoRuntime(node *v1.Node) {
node.Status.NodeInfo.OperatingSystem = goRuntime.GOOS node.Status.NodeInfo.OperatingSystem = goruntime.GOOS
node.Status.NodeInfo.Architecture = goRuntime.GOARCH node.Status.NodeInfo.Architecture = goruntime.GOARCH
} }
// Set status for the node. // Set status for the node.

View File

@@ -25,7 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/credentialprovider" "k8s.io/kubernetes/pkg/credentialprovider"
internalApi "k8s.io/kubernetes/pkg/kubelet/api" internalapi "k8s.io/kubernetes/pkg/kubelet/api"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/lifecycle"
@@ -91,7 +91,7 @@ func (f *fakePodGetter) GetPodByUID(uid types.UID) (*v1.Pod, bool) {
return pod, found return pod, found
} }
func NewFakeKubeRuntimeManager(runtimeService internalApi.RuntimeService, imageService internalApi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, networkPlugin network.NetworkPlugin, osInterface kubecontainer.OSInterface) (*kubeGenericRuntimeManager, error) { func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, networkPlugin network.NetworkPlugin, osInterface kubecontainer.OSInterface) (*kubeGenericRuntimeManager, error) {
recorder := &record.FakeRecorder{} recorder := &record.FakeRecorder{}
kubeRuntimeManager := &kubeGenericRuntimeManager{ kubeRuntimeManager := &kubeGenericRuntimeManager{
recorder: recorder, recorder: recorder,

View File

@@ -23,7 +23,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
) )
@@ -57,7 +57,7 @@ func (b containersByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b containersByID) Less(i, j int) bool { return b[i].ID.ID < b[j].ID.ID } func (b containersByID) Less(i, j int) bool { return b[i].ID.ID < b[j].ID.ID }
// Newest first. // Newest first.
type podSandboxByCreated []*runtimeApi.PodSandbox type podSandboxByCreated []*runtimeapi.PodSandbox
func (p podSandboxByCreated) Len() int { return len(p) } func (p podSandboxByCreated) Len() int { return len(p) }
func (p podSandboxByCreated) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p podSandboxByCreated) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
@@ -69,37 +69,37 @@ func (c containerStatusByCreated) Len() int { return len(c) }
func (c containerStatusByCreated) Swap(i, j int) { c[i], c[j] = c[j], c[i] } func (c containerStatusByCreated) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c containerStatusByCreated) Less(i, j int) bool { return c[i].CreatedAt.After(c[j].CreatedAt) } func (c containerStatusByCreated) Less(i, j int) bool { return c[i].CreatedAt.After(c[j].CreatedAt) }
// toKubeContainerState converts runtimeApi.ContainerState to kubecontainer.ContainerState. // toKubeContainerState converts runtimeapi.ContainerState to kubecontainer.ContainerState.
func toKubeContainerState(state runtimeApi.ContainerState) kubecontainer.ContainerState { func toKubeContainerState(state runtimeapi.ContainerState) kubecontainer.ContainerState {
switch state { switch state {
case runtimeApi.ContainerState_CONTAINER_CREATED: case runtimeapi.ContainerState_CONTAINER_CREATED:
return kubecontainer.ContainerStateCreated return kubecontainer.ContainerStateCreated
case runtimeApi.ContainerState_CONTAINER_RUNNING: case runtimeapi.ContainerState_CONTAINER_RUNNING:
return kubecontainer.ContainerStateRunning return kubecontainer.ContainerStateRunning
case runtimeApi.ContainerState_CONTAINER_EXITED: case runtimeapi.ContainerState_CONTAINER_EXITED:
return kubecontainer.ContainerStateExited return kubecontainer.ContainerStateExited
case runtimeApi.ContainerState_CONTAINER_UNKNOWN: case runtimeapi.ContainerState_CONTAINER_UNKNOWN:
return kubecontainer.ContainerStateUnknown return kubecontainer.ContainerStateUnknown
} }
return kubecontainer.ContainerStateUnknown return kubecontainer.ContainerStateUnknown
} }
// toRuntimeProtocol converts v1.Protocol to runtimeApi.Protocol. // toRuntimeProtocol converts v1.Protocol to runtimeapi.Protocol.
func toRuntimeProtocol(protocol v1.Protocol) runtimeApi.Protocol { func toRuntimeProtocol(protocol v1.Protocol) runtimeapi.Protocol {
switch protocol { switch protocol {
case v1.ProtocolTCP: case v1.ProtocolTCP:
return runtimeApi.Protocol_TCP return runtimeapi.Protocol_TCP
case v1.ProtocolUDP: case v1.ProtocolUDP:
return runtimeApi.Protocol_UDP return runtimeapi.Protocol_UDP
} }
glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol) glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol)
return runtimeApi.Protocol_TCP return runtimeapi.Protocol_TCP
} }
// toKubeContainer converts runtimeApi.Container to kubecontainer.Container. // toKubeContainer converts runtimeapi.Container to kubecontainer.Container.
func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeApi.Container) (*kubecontainer.Container, error) { func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeapi.Container) (*kubecontainer.Container, error) {
if c == nil || c.Id == nil || c.Image == nil || c.State == nil { if c == nil || c.Id == nil || c.Image == nil || c.State == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container") return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
} }
@@ -115,11 +115,11 @@ func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeApi.Container) (*k
}, nil }, nil
} }
// sandboxToKubeContainer converts runtimeApi.PodSandbox to kubecontainer.Container. // sandboxToKubeContainer converts runtimeapi.PodSandbox to kubecontainer.Container.
// This is only needed because we need to return sandboxes as if they were // This is only needed because we need to return sandboxes as if they were
// kubecontainer.Containers to avoid substantial changes to PLEG. // kubecontainer.Containers to avoid substantial changes to PLEG.
// TODO: Remove this once it becomes obsolete. // TODO: Remove this once it becomes obsolete.
func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeApi.PodSandbox) (*kubecontainer.Container, error) { func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeapi.PodSandbox) (*kubecontainer.Container, error) {
if s == nil || s.Id == nil || s.State == nil { if s == nil || s.Id == nil || s.State == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container") return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
} }
@@ -149,7 +149,7 @@ func getContainerSpec(pod *v1.Pod, containerName string) *v1.Container {
// getImageUser gets uid or user name that will run the command(s) from image. The function // getImageUser gets uid or user name that will run the command(s) from image. The function
// guarantees that only one of them is set. // guarantees that only one of them is set.
func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, *string, error) { func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, *string, error) {
imageStatus, err := m.imageService.ImageStatus(&runtimeApi.ImageSpec{Image: &image}) imageStatus, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: &image})
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@@ -237,8 +237,8 @@ func buildPodLogsDirectory(podUID types.UID) string {
return filepath.Join(podLogsRootDirectory, string(podUID)) return filepath.Join(podLogsRootDirectory, string(podUID))
} }
// toKubeRuntimeStatus converts the runtimeApi.RuntimeStatus to kubecontainer.RuntimeStatus. // toKubeRuntimeStatus converts the runtimeapi.RuntimeStatus to kubecontainer.RuntimeStatus.
func toKubeRuntimeStatus(status *runtimeApi.RuntimeStatus) *kubecontainer.RuntimeStatus { func toKubeRuntimeStatus(status *runtimeapi.RuntimeStatus) *kubecontainer.RuntimeStatus {
conditions := []kubecontainer.RuntimeCondition{} conditions := []kubecontainer.RuntimeCondition{}
for _, c := range status.GetConditions() { for _, c := range status.GetConditions() {
conditions = append(conditions, kubecontainer.RuntimeCondition{ conditions = append(conditions, kubecontainer.RuntimeCondition{

View File

@@ -19,30 +19,30 @@ package kuberuntime
import ( import (
"time" "time"
internalApi "k8s.io/kubernetes/pkg/kubelet/api" internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics"
) )
// instrumentedRuntimeService wraps the RuntimeService and records the operations // instrumentedRuntimeService wraps the RuntimeService and records the operations
// and errors metrics. // and errors metrics.
type instrumentedRuntimeService struct { type instrumentedRuntimeService struct {
service internalApi.RuntimeService service internalapi.RuntimeService
} }
// Creates an instrumented RuntimeInterface from an existing RuntimeService. // Creates an instrumented RuntimeInterface from an existing RuntimeService.
func NewInstrumentedRuntimeService(service internalApi.RuntimeService) internalApi.RuntimeService { func NewInstrumentedRuntimeService(service internalapi.RuntimeService) internalapi.RuntimeService {
return &instrumentedRuntimeService{service: service} return &instrumentedRuntimeService{service: service}
} }
// instrumentedImageManagerService wraps the ImageManagerService and records the operations // instrumentedImageManagerService wraps the ImageManagerService and records the operations
// and errors metrics. // and errors metrics.
type instrumentedImageManagerService struct { type instrumentedImageManagerService struct {
service internalApi.ImageManagerService service internalapi.ImageManagerService
} }
// Creates an instrumented ImageManagerService from an existing ImageManagerService. // Creates an instrumented ImageManagerService from an existing ImageManagerService.
func NewInstrumentedImageManagerService(service internalApi.ImageManagerService) internalApi.ImageManagerService { func NewInstrumentedImageManagerService(service internalapi.ImageManagerService) internalapi.ImageManagerService {
return &instrumentedImageManagerService{service: service} return &instrumentedImageManagerService{service: service}
} }
@@ -59,7 +59,7 @@ func recordError(operation string, err error) {
} }
} }
func (in instrumentedRuntimeService) Version(apiVersion string) (*runtimeApi.VersionResponse, error) { func (in instrumentedRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) {
const operation = "version" const operation = "version"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -68,7 +68,7 @@ func (in instrumentedRuntimeService) Version(apiVersion string) (*runtimeApi.Ver
return out, err return out, err
} }
func (in instrumentedRuntimeService) Status() (*runtimeApi.RuntimeStatus, error) { func (in instrumentedRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) {
const operation = "status" const operation = "status"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -77,7 +77,7 @@ func (in instrumentedRuntimeService) Status() (*runtimeApi.RuntimeStatus, error)
return out, err return out, err
} }
func (in instrumentedRuntimeService) CreateContainer(podSandboxID string, config *runtimeApi.ContainerConfig, sandboxConfig *runtimeApi.PodSandboxConfig) (string, error) { func (in instrumentedRuntimeService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
const operation = "create_container" const operation = "create_container"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -113,7 +113,7 @@ func (in instrumentedRuntimeService) RemoveContainer(containerID string) error {
return err return err
} }
func (in instrumentedRuntimeService) ListContainers(filter *runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) { func (in instrumentedRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
const operation = "list_containers" const operation = "list_containers"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -122,7 +122,7 @@ func (in instrumentedRuntimeService) ListContainers(filter *runtimeApi.Container
return out, err return out, err
} }
func (in instrumentedRuntimeService) ContainerStatus(containerID string) (*runtimeApi.ContainerStatus, error) { func (in instrumentedRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
const operation = "container_status" const operation = "container_status"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -140,7 +140,7 @@ func (in instrumentedRuntimeService) ExecSync(containerID string, cmd []string,
return stdout, stderr, err return stdout, stderr, err
} }
func (in instrumentedRuntimeService) Exec(req *runtimeApi.ExecRequest) (*runtimeApi.ExecResponse, error) { func (in instrumentedRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
const operation = "exec" const operation = "exec"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -149,7 +149,7 @@ func (in instrumentedRuntimeService) Exec(req *runtimeApi.ExecRequest) (*runtime
return resp, err return resp, err
} }
func (in instrumentedRuntimeService) Attach(req *runtimeApi.AttachRequest) (*runtimeApi.AttachResponse, error) { func (in instrumentedRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
const operation = "attach" const operation = "attach"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -158,7 +158,7 @@ func (in instrumentedRuntimeService) Attach(req *runtimeApi.AttachRequest) (*run
return resp, err return resp, err
} }
func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeApi.PodSandboxConfig) (string, error) { func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error) {
const operation = "run_podsandbox" const operation = "run_podsandbox"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -185,7 +185,7 @@ func (in instrumentedRuntimeService) RemovePodSandbox(podSandboxID string) error
return err return err
} }
func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeApi.PodSandboxStatus, error) { func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) {
const operation = "podsandbox_status" const operation = "podsandbox_status"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -194,7 +194,7 @@ func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string) (*run
return out, err return out, err
} }
func (in instrumentedRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]*runtimeApi.PodSandbox, error) { func (in instrumentedRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
const operation = "list_podsandbox" const operation = "list_podsandbox"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -203,7 +203,7 @@ func (in instrumentedRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandbo
return out, err return out, err
} }
func (in instrumentedRuntimeService) PortForward(req *runtimeApi.PortForwardRequest) (*runtimeApi.PortForwardResponse, error) { func (in instrumentedRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
const operation = "port_forward" const operation = "port_forward"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -212,7 +212,7 @@ func (in instrumentedRuntimeService) PortForward(req *runtimeApi.PortForwardRequ
return resp, err return resp, err
} }
func (in instrumentedRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeApi.RuntimeConfig) error { func (in instrumentedRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error {
const operation = "update_runtime_config" const operation = "update_runtime_config"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -221,7 +221,7 @@ func (in instrumentedRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeA
return err return err
} }
func (in instrumentedImageManagerService) ListImages(filter *runtimeApi.ImageFilter) ([]*runtimeApi.Image, error) { func (in instrumentedImageManagerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
const operation = "list_images" const operation = "list_images"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -230,7 +230,7 @@ func (in instrumentedImageManagerService) ListImages(filter *runtimeApi.ImageFil
return out, err return out, err
} }
func (in instrumentedImageManagerService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi.Image, error) { func (in instrumentedImageManagerService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
const operation = "image_status" const operation = "image_status"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -239,7 +239,7 @@ func (in instrumentedImageManagerService) ImageStatus(image *runtimeApi.ImageSpe
return out, err return out, err
} }
func (in instrumentedImageManagerService) PullImage(image *runtimeApi.ImageSpec, auth *runtimeApi.AuthConfig) error { func (in instrumentedImageManagerService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) error {
const operation = "pull_image" const operation = "pull_image"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())
@@ -248,7 +248,7 @@ func (in instrumentedImageManagerService) PullImage(image *runtimeApi.ImageSpec,
return err return err
} }
func (in instrumentedImageManagerService) RemoveImage(image *runtimeApi.ImageSpec) error { func (in instrumentedImageManagerService) RemoveImage(image *runtimeapi.ImageSpec) error {
const operation = "remove_image" const operation = "remove_image"
defer recordOperation(operation, time.Now()) defer recordOperation(operation, time.Now())

View File

@@ -31,7 +31,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/qos"
@@ -49,7 +49,7 @@ import (
// * create the container // * create the container
// * start the container // * start the container
// * run the post start lifecycle hooks (if applicable) // * run the post start lifecycle hooks (if applicable)
func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeApi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string) (string, error) { func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string) (string, error) {
// Step 1: pull the image. // Step 1: pull the image.
err, msg := m.imagePuller.EnsureImageExists(pod, container, pullSecrets) err, msg := m.imagePuller.EnsureImageExists(pod, container, pullSecrets)
if err != nil { if err != nil {
@@ -129,7 +129,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
} }
// generateContainerConfig generates container config for kubelet runtime v1. // generateContainerConfig generates container config for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP string) (*runtimeApi.ContainerConfig, error) { func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP string) (*runtimeapi.ContainerConfig, error) {
opts, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP) opts, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -151,12 +151,12 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
command, args := kubecontainer.ExpandContainerCommandAndArgs(container, opts.Envs) command, args := kubecontainer.ExpandContainerCommandAndArgs(container, opts.Envs)
containerLogsPath := buildContainerLogsPath(container.Name, restartCount) containerLogsPath := buildContainerLogsPath(container.Name, restartCount)
restartCountUint32 := uint32(restartCount) restartCountUint32 := uint32(restartCount)
config := &runtimeApi.ContainerConfig{ config := &runtimeapi.ContainerConfig{
Metadata: &runtimeApi.ContainerMetadata{ Metadata: &runtimeapi.ContainerMetadata{
Name: &container.Name, Name: &container.Name,
Attempt: &restartCountUint32, Attempt: &restartCountUint32,
}, },
Image: &runtimeApi.ImageSpec{Image: &container.Image}, Image: &runtimeapi.ImageSpec{Image: &container.Image},
Command: command, Command: command,
Args: args, Args: args,
WorkingDir: &container.WorkingDir, WorkingDir: &container.WorkingDir,
@@ -172,10 +172,10 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
} }
// set environment variables // set environment variables
envs := make([]*runtimeApi.KeyValue, len(opts.Envs)) envs := make([]*runtimeapi.KeyValue, len(opts.Envs))
for idx := range opts.Envs { for idx := range opts.Envs {
e := opts.Envs[idx] e := opts.Envs[idx]
envs[idx] = &runtimeApi.KeyValue{ envs[idx] = &runtimeapi.KeyValue{
Key: &e.Name, Key: &e.Name,
Value: &e.Value, Value: &e.Value,
} }
@@ -186,9 +186,9 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
} }
// generateLinuxContainerConfig generates linux container config for kubelet runtime v1. // generateLinuxContainerConfig generates linux container config for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username *string) *runtimeApi.LinuxContainerConfig { func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username *string) *runtimeapi.LinuxContainerConfig {
lc := &runtimeApi.LinuxContainerConfig{ lc := &runtimeapi.LinuxContainerConfig{
Resources: &runtimeApi.LinuxContainerResources{}, Resources: &runtimeapi.LinuxContainerResources{},
SecurityContext: m.determineEffectiveSecurityContext(pod, container, uid, username), SecurityContext: m.determineEffectiveSecurityContext(pod, container, uid, username),
} }
@@ -229,12 +229,12 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C
} }
// makeDevices generates container devices for kubelet runtime v1. // makeDevices generates container devices for kubelet runtime v1.
func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeApi.Device { func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeapi.Device {
devices := make([]*runtimeApi.Device, len(opts.Devices)) devices := make([]*runtimeapi.Device, len(opts.Devices))
for idx := range opts.Devices { for idx := range opts.Devices {
device := opts.Devices[idx] device := opts.Devices[idx]
devices[idx] = &runtimeApi.Device{ devices[idx] = &runtimeapi.Device{
HostPath: &device.PathOnHost, HostPath: &device.PathOnHost,
ContainerPath: &device.PathInContainer, ContainerPath: &device.PathInContainer,
Permissions: &device.Permissions, Permissions: &device.Permissions,
@@ -245,13 +245,13 @@ func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeApi.Device {
} }
// makeMounts generates container volume mounts for kubelet runtime v1. // makeMounts generates container volume mounts for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerOptions, container *v1.Container) []*runtimeApi.Mount { func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerOptions, container *v1.Container) []*runtimeapi.Mount {
volumeMounts := []*runtimeApi.Mount{} volumeMounts := []*runtimeapi.Mount{}
for idx := range opts.Mounts { for idx := range opts.Mounts {
v := opts.Mounts[idx] v := opts.Mounts[idx]
selinuxRelabel := v.SELinuxRelabel && selinux.SELinuxEnabled() selinuxRelabel := v.SELinuxRelabel && selinux.SELinuxEnabled()
mount := &runtimeApi.Mount{ mount := &runtimeapi.Mount{
HostPath: &v.HostPath, HostPath: &v.HostPath,
ContainerPath: &v.ContainerPath, ContainerPath: &v.ContainerPath,
Readonly: &v.ReadOnly, Readonly: &v.ReadOnly,
@@ -276,7 +276,7 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO
} else { } else {
fs.Close() fs.Close()
selinuxRelabel := selinux.SELinuxEnabled() selinuxRelabel := selinux.SELinuxEnabled()
volumeMounts = append(volumeMounts, &runtimeApi.Mount{ volumeMounts = append(volumeMounts, &runtimeapi.Mount{
HostPath: &containerLogPath, HostPath: &containerLogPath,
ContainerPath: &container.TerminationMessagePath, ContainerPath: &container.TerminationMessagePath,
SelinuxRelabel: &selinuxRelabel, SelinuxRelabel: &selinuxRelabel,
@@ -290,12 +290,12 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO
// getKubeletContainers lists containers managed by kubelet. // getKubeletContainers lists containers managed by kubelet.
// The boolean parameter specifies whether returns all containers including // The boolean parameter specifies whether returns all containers including
// those already exited and dead containers (used for garbage collection). // those already exited and dead containers (used for garbage collection).
func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]*runtimeApi.Container, error) { func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]*runtimeapi.Container, error) {
filter := &runtimeApi.ContainerFilter{ filter := &runtimeapi.ContainerFilter{
LabelSelector: map[string]string{kubernetesManagedLabel: "true"}, LabelSelector: map[string]string{kubernetesManagedLabel: "true"},
} }
if !allContainers { if !allContainers {
runningState := runtimeApi.ContainerState_CONTAINER_RUNNING runningState := runtimeapi.ContainerState_CONTAINER_RUNNING
filter.State = &runningState filter.State = &runningState
} }
@@ -309,7 +309,7 @@ func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]
} }
// getContainers lists containers by filter. // getContainers lists containers by filter.
func (m *kubeGenericRuntimeManager) getContainersHelper(filter *runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) { func (m *kubeGenericRuntimeManager) getContainersHelper(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
resp, err := m.runtimeService.ListContainers(filter) resp, err := m.runtimeService.ListContainers(filter)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -324,7 +324,7 @@ func makeUID() string {
} }
// getTerminationMessage gets termination message of the container. // getTerminationMessage gets termination message of the container.
func getTerminationMessage(status *runtimeApi.ContainerStatus, kubeStatus *kubecontainer.ContainerStatus, terminationMessagePath string) string { func getTerminationMessage(status *runtimeapi.ContainerStatus, kubeStatus *kubecontainer.ContainerStatus, terminationMessagePath string) string {
message := "" message := ""
if !kubeStatus.FinishedAt.IsZero() || kubeStatus.ExitCode != 0 { if !kubeStatus.FinishedAt.IsZero() || kubeStatus.ExitCode != 0 {
@@ -351,7 +351,7 @@ func getTerminationMessage(status *runtimeApi.ContainerStatus, kubeStatus *kubec
// getPodContainerStatuses gets all containers' statuses for the pod. // getPodContainerStatuses gets all containers' statuses for the pod.
func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, name, namespace string) ([]*kubecontainer.ContainerStatus, error) { func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, name, namespace string) ([]*kubecontainer.ContainerStatus, error) {
// Select all containers of the given pod. // Select all containers of the given pod.
containers, err := m.runtimeService.ListContainers(&runtimeApi.ContainerFilter{ containers, err := m.runtimeService.ListContainers(&runtimeapi.ContainerFilter{
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(uid)}, LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(uid)},
}) })
if err != nil { if err != nil {
@@ -384,7 +384,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
CreatedAt: time.Unix(0, status.GetCreatedAt()), CreatedAt: time.Unix(0, status.GetCreatedAt()),
} }
if c.GetState() == runtimeApi.ContainerState_CONTAINER_RUNNING { if c.GetState() == runtimeapi.ContainerState_CONTAINER_RUNNING {
cStatus.StartedAt = time.Unix(0, status.GetStartedAt()) cStatus.StartedAt = time.Unix(0, status.GetStartedAt())
} else { } else {
cStatus.Reason = status.GetReason() cStatus.Reason = status.GetReason()
@@ -669,7 +669,7 @@ func (m *kubeGenericRuntimeManager) GetContainerLogs(pod *v1.Pod, containerID ku
// GetExec gets the endpoint the runtime will serve the exec request from. // GetExec gets the endpoint the runtime will serve the exec request from.
func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) {
req := &runtimeApi.ExecRequest{ req := &runtimeapi.ExecRequest{
ContainerId: &id.ID, ContainerId: &id.ID,
Cmd: cmd, Cmd: cmd,
Tty: &tty, Tty: &tty,
@@ -685,7 +685,7 @@ func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []
// GetAttach gets the endpoint the runtime will serve the attach request from. // GetAttach gets the endpoint the runtime will serve the attach request from.
func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr bool) (*url.URL, error) { func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr bool) (*url.URL, error) {
req := &runtimeApi.AttachRequest{ req := &runtimeapi.AttachRequest{
ContainerId: &id.ID, ContainerId: &id.ID,
Stdin: &stdin, Stdin: &stdin,
} }

View File

@@ -22,7 +22,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
) )
@@ -60,7 +60,7 @@ func TestRemoveContainer(t *testing.T) {
assert.Equal(t, fakeOS.Removes, []string{expectedContainerLogPath, expectedContainerLogSymlink}) assert.Equal(t, fakeOS.Removes, []string{expectedContainerLogPath, expectedContainerLogSymlink})
// Verify container is removed // Verify container is removed
fakeRuntime.AssertCalls([]string{"RemoveContainer"}) fakeRuntime.AssertCalls([]string{"RemoveContainer"})
containers, err := fakeRuntime.ListContainers(&runtimeApi.ContainerFilter{Id: &containerId}) containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: &containerId})
assert.NoError(t, err) assert.NoError(t, err)
assert.Empty(t, containers) assert.Empty(t, containers)
} }

View File

@@ -24,8 +24,8 @@ import (
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
internalApi "k8s.io/kubernetes/pkg/kubelet/api" internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
) )
@@ -46,13 +46,13 @@ const sandboxMinGCAge time.Duration = 30 * time.Second
// containerGC is the manager of garbage collection. // containerGC is the manager of garbage collection.
type containerGC struct { type containerGC struct {
client internalApi.RuntimeService client internalapi.RuntimeService
manager *kubeGenericRuntimeManager manager *kubeGenericRuntimeManager
podGetter podGetter podGetter podGetter
} }
// NewContainerGC creates a new containerGC. // NewContainerGC creates a new containerGC.
func NewContainerGC(client internalApi.RuntimeService, podGetter podGetter, manager *kubeGenericRuntimeManager) *containerGC { func NewContainerGC(client internalapi.RuntimeService, podGetter podGetter, manager *kubeGenericRuntimeManager) *containerGC {
return &containerGC{ return &containerGC{
client: client, client: client,
manager: manager, manager: manager,
@@ -161,7 +161,7 @@ func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByE
newestGCTime := time.Now().Add(-minAge) newestGCTime := time.Now().Add(-minAge)
for _, container := range containers { for _, container := range containers {
// Prune out running containers. // Prune out running containers.
if container.GetState() == runtimeApi.ContainerState_CONTAINER_RUNNING { if container.GetState() == runtimeapi.ContainerState_CONTAINER_RUNNING {
continue continue
} }
@@ -256,7 +256,7 @@ func (cgc *containerGC) evictSandboxes(minAge time.Duration) error {
newestGCTime := time.Now().Add(-minAge) newestGCTime := time.Now().Add(-minAge)
for _, sandbox := range sandboxes { for _, sandbox := range sandboxes {
// Prune out ready sandboxes. // Prune out ready sandboxes.
if sandbox.GetState() == runtimeApi.PodSandboxState_SANDBOX_READY { if sandbox.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
continue continue
} }

View File

@@ -25,7 +25,7 @@ import (
"github.com/golang/mock/gomock" "github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
) )
@@ -54,7 +54,7 @@ func TestSandboxGC(t *testing.T) {
{ {
description: "sandbox with no containers should be garbage collected.", description: "sandbox with no containers should be garbage collected.",
sandboxes: []sandboxTemplate{ sandboxes: []sandboxTemplate{
{pod: pods[0], state: runtimeApi.PodSandboxState_SANDBOX_NOTREADY}, {pod: pods[0], state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
}, },
containers: []containerTemplate{}, containers: []containerTemplate{},
remain: []int{}, remain: []int{},
@@ -62,7 +62,7 @@ func TestSandboxGC(t *testing.T) {
{ {
description: "running sandbox should not be garbage collected.", description: "running sandbox should not be garbage collected.",
sandboxes: []sandboxTemplate{ sandboxes: []sandboxTemplate{
{pod: pods[0], state: runtimeApi.PodSandboxState_SANDBOX_READY}, {pod: pods[0], state: runtimeapi.PodSandboxState_SANDBOX_READY},
}, },
containers: []containerTemplate{}, containers: []containerTemplate{},
remain: []int{0}, remain: []int{0},
@@ -70,18 +70,18 @@ func TestSandboxGC(t *testing.T) {
{ {
description: "sandbox with containers should not be garbage collected.", description: "sandbox with containers should not be garbage collected.",
sandboxes: []sandboxTemplate{ sandboxes: []sandboxTemplate{
{pod: pods[0], state: runtimeApi.PodSandboxState_SANDBOX_NOTREADY}, {pod: pods[0], state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
}, },
containers: []containerTemplate{ containers: []containerTemplate{
{pod: pods[0], container: &pods[0].Spec.Containers[0], state: runtimeApi.ContainerState_CONTAINER_EXITED}, {pod: pods[0], container: &pods[0].Spec.Containers[0], state: runtimeapi.ContainerState_CONTAINER_EXITED},
}, },
remain: []int{0}, remain: []int{0},
}, },
{ {
description: "sandbox within min age should not be garbage collected.", description: "sandbox within min age should not be garbage collected.",
sandboxes: []sandboxTemplate{ sandboxes: []sandboxTemplate{
{pod: pods[0], createdAt: time.Now().UnixNano(), state: runtimeApi.PodSandboxState_SANDBOX_NOTREADY}, {pod: pods[0], createdAt: time.Now().UnixNano(), state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
{pod: pods[1], createdAt: time.Now().Add(-2 * time.Hour).UnixNano(), state: runtimeApi.PodSandboxState_SANDBOX_NOTREADY}, {pod: pods[1], createdAt: time.Now().Add(-2 * time.Hour).UnixNano(), state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
}, },
containers: []containerTemplate{}, containers: []containerTemplate{},
minAge: time.Hour, // assume the test won't take an hour minAge: time.Hour, // assume the test won't take an hour
@@ -91,14 +91,14 @@ func TestSandboxGC(t *testing.T) {
description: "multiple sandboxes should be handled properly.", description: "multiple sandboxes should be handled properly.",
sandboxes: []sandboxTemplate{ sandboxes: []sandboxTemplate{
// running sandbox. // running sandbox.
{pod: pods[0], attempt: 1, state: runtimeApi.PodSandboxState_SANDBOX_READY}, {pod: pods[0], attempt: 1, state: runtimeapi.PodSandboxState_SANDBOX_READY},
// exited sandbox with containers. // exited sandbox with containers.
{pod: pods[1], attempt: 1, state: runtimeApi.PodSandboxState_SANDBOX_NOTREADY}, {pod: pods[1], attempt: 1, state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
// exited sandbox without containers. // exited sandbox without containers.
{pod: pods[1], attempt: 0, state: runtimeApi.PodSandboxState_SANDBOX_NOTREADY}, {pod: pods[1], attempt: 0, state: runtimeapi.PodSandboxState_SANDBOX_NOTREADY},
}, },
containers: []containerTemplate{ containers: []containerTemplate{
{pod: pods[1], container: &pods[1].Spec.Containers[0], sandboxAttempt: 1, state: runtimeApi.ContainerState_CONTAINER_EXITED}, {pod: pods[1], container: &pods[1].Spec.Containers[0], sandboxAttempt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED},
}, },
remain: []int{0, 1}, remain: []int{0, 1},
}, },
@@ -127,7 +127,7 @@ func TestContainerGC(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
fakePodGetter := m.containerGC.podGetter.(*fakePodGetter) fakePodGetter := m.containerGC.podGetter.(*fakePodGetter)
makeGCContainer := func(podName, containerName string, attempt int, createdAt int64, state runtimeApi.ContainerState) containerTemplate { makeGCContainer := func(podName, containerName string, attempt int, createdAt int64, state runtimeapi.ContainerState) containerTemplate {
container := makeTestContainer(containerName, "test-image") container := makeTestContainer(containerName, "test-image")
pod := makeTestPod(podName, "test-ns", podName, []v1.Container{container}) pod := makeTestPod(podName, "test-ns", podName, []v1.Container{container})
if podName != "deleted" { if podName != "deleted" {
@@ -153,7 +153,7 @@ func TestContainerGC(t *testing.T) {
{ {
description: "all containers should be removed when max container limit is 0", description: "all containers should be removed when max container limit is 0",
containers: []containerTemplate{ containers: []containerTemplate{
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
}, },
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: 1, MaxContainers: 0}, policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: 1, MaxContainers: 0},
remain: []int{}, remain: []int{},
@@ -161,11 +161,11 @@ func TestContainerGC(t *testing.T) {
{ {
description: "max containers should be complied when no max per pod container limit is set", description: "max containers should be complied when no max per pod container limit is set",
containers: []containerTemplate{ containers: []containerTemplate{
makeGCContainer("foo", "bar", 4, 4, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 4, 4, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 3, 3, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 3, 3, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
}, },
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: 4}, policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: 4},
remain: []int{0, 1, 2, 3}, remain: []int{0, 1, 2, 3},
@@ -173,9 +173,9 @@ func TestContainerGC(t *testing.T) {
{ {
description: "no containers should be removed if both max container and per pod container limits are not set", description: "no containers should be removed if both max container and per pod container limits are not set",
containers: []containerTemplate{ containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
}, },
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: -1}, policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: -1},
remain: []int{0, 1, 2}, remain: []int{0, 1, 2},
@@ -183,94 +183,94 @@ func TestContainerGC(t *testing.T) {
{ {
description: "recently started containers should not be removed", description: "recently started containers should not be removed",
containers: []containerTemplate{ containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, time.Now().UnixNano(), runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 2, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, time.Now().UnixNano(), runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 1, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, time.Now().UnixNano(), runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 0, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED),
}, },
remain: []int{0, 1, 2}, remain: []int{0, 1, 2},
}, },
{ {
description: "oldest containers should be removed when per pod container limit exceeded", description: "oldest containers should be removed when per pod container limit exceeded",
containers: []containerTemplate{ containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
}, },
remain: []int{0, 1}, remain: []int{0, 1},
}, },
{ {
description: "running containers should not be removed", description: "running containers should not be removed",
containers: []containerTemplate{ containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_RUNNING), makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_RUNNING),
}, },
remain: []int{0, 1, 2}, remain: []int{0, 1, 2},
}, },
{ {
description: "no containers should be removed when limits are not exceeded", description: "no containers should be removed when limits are not exceeded",
containers: []containerTemplate{ containers: []containerTemplate{
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
}, },
remain: []int{0, 1}, remain: []int{0, 1},
}, },
{ {
description: "max container count should apply per (UID, container) pair", description: "max container count should apply per (UID, container) pair",
containers: []containerTemplate{ containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "baz", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo1", "baz", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "baz", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo1", "baz", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "baz", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo1", "baz", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo2", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo2", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo2", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
}, },
remain: []int{0, 1, 3, 4, 6, 7}, remain: []int{0, 1, 3, 4, 6, 7},
}, },
{ {
description: "max limit should apply and try to keep from every pod", description: "max limit should apply and try to keep from every pod",
containers: []containerTemplate{ containers: []containerTemplate{
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo1", "bar1", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo1", "bar1", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar2", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo2", "bar2", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar2", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo2", "bar2", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo3", "bar3", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo3", "bar3", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo3", "bar3", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo3", "bar3", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo4", "bar4", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo4", "bar4", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo4", "bar4", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo4", "bar4", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
}, },
remain: []int{0, 2, 4, 6, 8}, remain: []int{0, 2, 4, 6, 8},
}, },
{ {
description: "oldest pods should be removed if limit exceeded", description: "oldest pods should be removed if limit exceeded",
containers: []containerTemplate{ containers: []containerTemplate{
makeGCContainer("foo", "bar", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo1", "bar1", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo1", "bar1", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo1", "bar1", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo2", "bar2", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo2", "bar2", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo3", "bar3", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo3", "bar3", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo4", "bar4", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo4", "bar4", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo5", "bar5", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo5", "bar5", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo6", "bar6", 2, 2, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo6", "bar6", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo7", "bar7", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo7", "bar7", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
}, },
remain: []int{0, 2, 4, 6, 8, 9}, remain: []int{0, 2, 4, 6, 8, 9},
}, },
{ {
description: "containers for deleted pods should be removed", description: "containers for deleted pods should be removed",
containers: []containerTemplate{ containers: []containerTemplate{
makeGCContainer("foo", "bar", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("foo", "bar", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
// deleted pods still respect MinAge. // deleted pods still respect MinAge.
makeGCContainer("deleted", "bar1", 2, time.Now().UnixNano(), runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("deleted", "bar1", 2, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("deleted", "bar1", 1, 1, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("deleted", "bar1", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED),
makeGCContainer("deleted", "bar1", 0, 0, runtimeApi.ContainerState_CONTAINER_EXITED), makeGCContainer("deleted", "bar1", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
}, },
remain: []int{0, 1, 2}, remain: []int{0, 1, 2},
}, },

View File

@@ -20,7 +20,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/credentialprovider" "k8s.io/kubernetes/pkg/credentialprovider"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
utilerrors "k8s.io/kubernetes/pkg/util/errors" utilerrors "k8s.io/kubernetes/pkg/util/errors"
"k8s.io/kubernetes/pkg/util/parsers" "k8s.io/kubernetes/pkg/util/parsers"
@@ -40,7 +40,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
return err return err
} }
imgSpec := &runtimeApi.ImageSpec{Image: &img} imgSpec := &runtimeapi.ImageSpec{Image: &img}
creds, withCredentials := keyring.Lookup(repoToPull) creds, withCredentials := keyring.Lookup(repoToPull)
if !withCredentials { if !withCredentials {
glog.V(3).Infof("Pulling image %q without credentials", img) glog.V(3).Infof("Pulling image %q without credentials", img)
@@ -57,7 +57,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
var pullErrs []error var pullErrs []error
for _, currentCreds := range creds { for _, currentCreds := range creds {
authConfig := credentialprovider.LazyProvide(currentCreds) authConfig := credentialprovider.LazyProvide(currentCreds)
auth := &runtimeApi.AuthConfig{ auth := &runtimeapi.AuthConfig{
Username: &authConfig.Username, Username: &authConfig.Username,
Password: &authConfig.Password, Password: &authConfig.Password,
Auth: &authConfig.Auth, Auth: &authConfig.Auth,
@@ -80,7 +80,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
// IsImagePresent checks whether the container image is already in the local storage. // IsImagePresent checks whether the container image is already in the local storage.
func (m *kubeGenericRuntimeManager) IsImagePresent(image kubecontainer.ImageSpec) (bool, error) { func (m *kubeGenericRuntimeManager) IsImagePresent(image kubecontainer.ImageSpec) (bool, error) {
status, err := m.imageService.ImageStatus(&runtimeApi.ImageSpec{Image: &image.Image}) status, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: &image.Image})
if err != nil { if err != nil {
glog.Errorf("ImageStatus for image %q failed: %v", image, err) glog.Errorf("ImageStatus for image %q failed: %v", image, err)
return false, err return false, err
@@ -112,7 +112,7 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error)
// RemoveImage removes the specified image. // RemoveImage removes the specified image.
func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) error { func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) error {
err := m.imageService.RemoveImage(&runtimeApi.ImageSpec{Image: &image.Image}) err := m.imageService.RemoveImage(&runtimeapi.ImageSpec{Image: &image.Image})
if err != nil { if err != nil {
glog.Errorf("Remove image %q failed: %v", image.Image, err) glog.Errorf("Remove image %q failed: %v", image.Image, err)
return err return err

View File

@@ -29,8 +29,8 @@ import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/credentialprovider" "k8s.io/kubernetes/pkg/credentialprovider"
internalApi "k8s.io/kubernetes/pkg/kubelet/api" internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/pkg/kubelet/images"
@@ -101,8 +101,8 @@ type kubeGenericRuntimeManager struct {
imagePuller images.ImageManager imagePuller images.ImageManager
// gRPC service clients // gRPC service clients
runtimeService internalApi.RuntimeService runtimeService internalapi.RuntimeService
imageService internalApi.ImageManagerService imageService internalapi.ImageManagerService
// The version cache of runtime daemon. // The version cache of runtime daemon.
versionCache *cache.ObjectCache versionCache *cache.ObjectCache
@@ -130,8 +130,8 @@ func NewKubeGenericRuntimeManager(
imagePullQPS float32, imagePullQPS float32,
imagePullBurst int, imagePullBurst int,
cpuCFSQuota bool, cpuCFSQuota bool,
runtimeService internalApi.RuntimeService, runtimeService internalapi.RuntimeService,
imageService internalApi.ImageManagerService, imageService internalapi.ImageManagerService,
) (KubeGenericRuntime, error) { ) (KubeGenericRuntime, error) {
kubeRuntimeManager := &kubeGenericRuntimeManager{ kubeRuntimeManager := &kubeGenericRuntimeManager{
recorder: recorder, recorder: recorder,
@@ -231,7 +231,7 @@ func (r runtimeVersion) Compare(other string) (int, error) {
return 0, nil return 0, nil
} }
func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeApi.VersionResponse, error) { func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionResponse, error) {
typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion) typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion)
if err != nil { if err != nil {
glog.Errorf("Get remote runtime typed version failed: %v", err) glog.Errorf("Get remote runtime typed version failed: %v", err)
@@ -259,7 +259,7 @@ func (m *kubeGenericRuntimeManager) APIVersion() (kubecontainer.Version, error)
if err != nil { if err != nil {
return nil, err return nil, err
} }
typedVersion := versionObject.(*runtimeApi.VersionResponse) typedVersion := versionObject.(*runtimeapi.VersionResponse)
return newRuntimeVersion(typedVersion.GetRuntimeApiVersion()) return newRuntimeVersion(typedVersion.GetRuntimeApiVersion())
} }
@@ -396,14 +396,14 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *ku
readySandboxCount := 0 readySandboxCount := 0
for _, s := range podStatus.SandboxStatuses { for _, s := range podStatus.SandboxStatuses {
if s.GetState() == runtimeApi.PodSandboxState_SANDBOX_READY { if s.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
readySandboxCount++ readySandboxCount++
} }
} }
// Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one. // Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one.
sandboxStatus := podStatus.SandboxStatuses[0] sandboxStatus := podStatus.SandboxStatuses[0]
if readySandboxCount > 1 || sandboxStatus.GetState() != runtimeApi.PodSandboxState_SANDBOX_READY { if readySandboxCount > 1 || sandboxStatus.GetState() != runtimeapi.PodSandboxState_SANDBOX_READY {
glog.V(2).Infof("No ready sandbox for pod %q can be found. Need to start a new one", format.Pod(pod)) glog.V(2).Infof("No ready sandbox for pod %q can be found. Need to start a new one", format.Pod(pod))
return true, sandboxStatus.Metadata.GetAttempt() + 1, sandboxStatus.GetId() return true, sandboxStatus.Metadata.GetAttempt() + 1, sandboxStatus.GetId()
} }
@@ -857,7 +857,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
}) })
glog.V(4).Infof("getSandboxIDByPodUID got sandbox IDs %q for pod %q", podSandboxIDs, podFullName) glog.V(4).Infof("getSandboxIDByPodUID got sandbox IDs %q for pod %q", podSandboxIDs, podFullName)
sandboxStatuses := make([]*runtimeApi.PodSandboxStatus, len(podSandboxIDs)) sandboxStatuses := make([]*runtimeapi.PodSandboxStatus, len(podSandboxIDs))
podIP := "" podIP := ""
for idx, podSandboxID := range podSandboxIDs { for idx, podSandboxID := range podSandboxIDs {
podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID) podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID)
@@ -868,7 +868,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
sandboxStatuses[idx] = podSandboxStatus sandboxStatuses[idx] = podSandboxStatus
// Only get pod IP from latest sandbox // Only get pod IP from latest sandbox
if idx == 0 && podSandboxStatus.GetState() == runtimeApi.PodSandboxState_SANDBOX_READY { if idx == 0 && podSandboxStatus.GetState() == runtimeapi.PodSandboxState_SANDBOX_READY {
podIP = m.determinePodSandboxIP(namespace, name, podSandboxStatus) podIP = m.determinePodSandboxIP(namespace, name, podSandboxStatus)
} }
} }
@@ -922,8 +922,8 @@ func (m *kubeGenericRuntimeManager) UpdatePodCIDR(podCIDR string) error {
// field of the config? // field of the config?
glog.Infof("updating runtime config through cri with podcidr %v", podCIDR) glog.Infof("updating runtime config through cri with podcidr %v", podCIDR)
return m.runtimeService.UpdateRuntimeConfig( return m.runtimeService.UpdateRuntimeConfig(
&runtimeApi.RuntimeConfig{ &runtimeapi.RuntimeConfig{
NetworkConfig: &runtimeApi.NetworkConfig{ NetworkConfig: &runtimeapi.NetworkConfig{
PodCidr: &podCIDR, PodCidr: &podCIDR,
}, },
}) })

View File

@@ -27,7 +27,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/apis/componentconfig"
apitest "k8s.io/kubernetes/pkg/kubelet/api/testing" apitest "k8s.io/kubernetes/pkg/kubelet/api/testing"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/network" "k8s.io/kubernetes/pkg/kubelet/network"
@@ -66,7 +66,7 @@ type sandboxTemplate struct {
pod *v1.Pod pod *v1.Pod
attempt uint32 attempt uint32
createdAt int64 createdAt int64
state runtimeApi.PodSandboxState state runtimeapi.PodSandboxState
} }
// containerTemplate is a container template to create fake container. // containerTemplate is a container template to create fake container.
@@ -76,7 +76,7 @@ type containerTemplate struct {
sandboxAttempt uint32 sandboxAttempt uint32
attempt int attempt int
createdAt int64 createdAt int64
state runtimeApi.ContainerState state runtimeapi.ContainerState
} }
// makeAndSetFakePod is a helper function to create and set one fake sandbox for a pod and // makeAndSetFakePod is a helper function to create and set one fake sandbox for a pod and
@@ -86,7 +86,7 @@ func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime *
sandbox := makeFakePodSandbox(t, m, sandboxTemplate{ sandbox := makeFakePodSandbox(t, m, sandboxTemplate{
pod: pod, pod: pod,
createdAt: fakeCreatedAt, createdAt: fakeCreatedAt,
state: runtimeApi.PodSandboxState_SANDBOX_READY, state: runtimeapi.PodSandboxState_SANDBOX_READY,
}) })
var containers []*apitest.FakeContainer var containers []*apitest.FakeContainer
@@ -95,7 +95,7 @@ func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime *
pod: pod, pod: pod,
container: c, container: c,
createdAt: fakeCreatedAt, createdAt: fakeCreatedAt,
state: runtimeApi.ContainerState_CONTAINER_RUNNING, state: runtimeapi.ContainerState_CONTAINER_RUNNING,
} }
} }
for i := range pod.Spec.Containers { for i := range pod.Spec.Containers {
@@ -117,12 +117,12 @@ func makeFakePodSandbox(t *testing.T, m *kubeGenericRuntimeManager, template san
podSandboxID := apitest.BuildSandboxName(config.Metadata) podSandboxID := apitest.BuildSandboxName(config.Metadata)
return &apitest.FakePodSandbox{ return &apitest.FakePodSandbox{
PodSandboxStatus: runtimeApi.PodSandboxStatus{ PodSandboxStatus: runtimeapi.PodSandboxStatus{
Id: &podSandboxID, Id: &podSandboxID,
Metadata: config.Metadata, Metadata: config.Metadata,
State: &template.state, State: &template.state,
CreatedAt: &template.createdAt, CreatedAt: &template.createdAt,
Network: &runtimeApi.PodSandboxNetworkStatus{ Network: &runtimeapi.PodSandboxNetworkStatus{
Ip: &apitest.FakePodSandboxIP, Ip: &apitest.FakePodSandboxIP,
}, },
Labels: config.Labels, Labels: config.Labels,
@@ -152,7 +152,7 @@ func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template cont
containerID := apitest.BuildContainerName(containerConfig.Metadata, podSandboxID) containerID := apitest.BuildContainerName(containerConfig.Metadata, podSandboxID)
imageRef := containerConfig.Image.GetImage() imageRef := containerConfig.Image.GetImage()
return &apitest.FakeContainer{ return &apitest.FakeContainer{
ContainerStatus: runtimeApi.ContainerStatus{ ContainerStatus: runtimeapi.ContainerStatus{
Id: &containerID, Id: &containerID,
Metadata: containerConfig.Metadata, Metadata: containerConfig.Metadata,
Image: containerConfig.Image, Image: containerConfig.Image,
@@ -321,7 +321,7 @@ func TestGetPods(t *testing.T) {
containers := make([]*kubecontainer.Container, len(fakeContainers)) containers := make([]*kubecontainer.Container, len(fakeContainers))
for i := range containers { for i := range containers {
fakeContainer := fakeContainers[i] fakeContainer := fakeContainers[i]
c, err := m.toKubeContainer(&runtimeApi.Container{ c, err := m.toKubeContainer(&runtimeapi.Container{
Id: fakeContainer.Id, Id: fakeContainer.Id,
Metadata: fakeContainer.Metadata, Metadata: fakeContainer.Metadata,
State: fakeContainer.State, State: fakeContainer.State,
@@ -336,7 +336,7 @@ func TestGetPods(t *testing.T) {
containers[i] = c containers[i] = c
} }
// Convert fakeSandbox to kubecontainer.Container // Convert fakeSandbox to kubecontainer.Container
sandbox, err := m.sandboxToKubeContainer(&runtimeApi.PodSandbox{ sandbox, err := m.sandboxToKubeContainer(&runtimeapi.PodSandbox{
Id: fakeSandbox.Id, Id: fakeSandbox.Id,
Metadata: fakeSandbox.Metadata, Metadata: fakeSandbox.Metadata,
State: fakeSandbox.State, State: fakeSandbox.State,
@@ -393,7 +393,7 @@ func TestGetPodContainerID(t *testing.T) {
fakeSandbox, _ := makeAndSetFakePod(t, m, fakeRuntime, pod) fakeSandbox, _ := makeAndSetFakePod(t, m, fakeRuntime, pod)
// Convert fakeSandbox to kubecontainer.Container // Convert fakeSandbox to kubecontainer.Container
sandbox, err := m.sandboxToKubeContainer(&runtimeApi.PodSandbox{ sandbox, err := m.sandboxToKubeContainer(&runtimeapi.PodSandbox{
Id: fakeSandbox.Id, Id: fakeSandbox.Id,
Metadata: fakeSandbox.Metadata, Metadata: fakeSandbox.Metadata,
State: fakeSandbox.State, State: fakeSandbox.State,
@@ -476,7 +476,7 @@ func TestKillPod(t *testing.T) {
containers := make([]*kubecontainer.Container, len(fakeContainers)) containers := make([]*kubecontainer.Container, len(fakeContainers))
for i := range containers { for i := range containers {
fakeContainer := fakeContainers[i] fakeContainer := fakeContainers[i]
c, err := m.toKubeContainer(&runtimeApi.Container{ c, err := m.toKubeContainer(&runtimeapi.Container{
Id: fakeContainer.Id, Id: fakeContainer.Id,
Metadata: fakeContainer.Metadata, Metadata: fakeContainer.Metadata,
State: fakeContainer.State, State: fakeContainer.State,
@@ -509,10 +509,10 @@ func TestKillPod(t *testing.T) {
assert.Equal(t, 2, len(fakeRuntime.Containers)) assert.Equal(t, 2, len(fakeRuntime.Containers))
assert.Equal(t, 1, len(fakeRuntime.Sandboxes)) assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
for _, sandbox := range fakeRuntime.Sandboxes { for _, sandbox := range fakeRuntime.Sandboxes {
assert.Equal(t, runtimeApi.PodSandboxState_SANDBOX_NOTREADY, sandbox.GetState()) assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.GetState())
} }
for _, c := range fakeRuntime.Containers { for _, c := range fakeRuntime.Containers {
assert.Equal(t, runtimeApi.ContainerState_CONTAINER_EXITED, c.GetState()) assert.Equal(t, runtimeapi.ContainerState_CONTAINER_EXITED, c.GetState())
} }
} }
@@ -550,10 +550,10 @@ func TestSyncPod(t *testing.T) {
assert.Equal(t, 2, len(fakeImage.Images)) assert.Equal(t, 2, len(fakeImage.Images))
assert.Equal(t, 1, len(fakeRuntime.Sandboxes)) assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
for _, sandbox := range fakeRuntime.Sandboxes { for _, sandbox := range fakeRuntime.Sandboxes {
assert.Equal(t, runtimeApi.PodSandboxState_SANDBOX_READY, sandbox.GetState()) assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.GetState())
} }
for _, c := range fakeRuntime.Containers { for _, c := range fakeRuntime.Containers {
assert.Equal(t, runtimeApi.ContainerState_CONTAINER_RUNNING, c.GetState()) assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.GetState())
} }
} }
@@ -575,11 +575,11 @@ func TestPruneInitContainers(t *testing.T) {
} }
templates := []containerTemplate{ templates := []containerTemplate{
{pod: pod, container: &init1, attempt: 2, createdAt: 2, state: runtimeApi.ContainerState_CONTAINER_EXITED}, {pod: pod, container: &init1, attempt: 2, createdAt: 2, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init1, attempt: 1, createdAt: 1, state: runtimeApi.ContainerState_CONTAINER_EXITED}, {pod: pod, container: &init1, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init2, attempt: 1, createdAt: 1, state: runtimeApi.ContainerState_CONTAINER_EXITED}, {pod: pod, container: &init2, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init2, attempt: 0, createdAt: 0, state: runtimeApi.ContainerState_CONTAINER_EXITED}, {pod: pod, container: &init2, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init1, attempt: 0, createdAt: 0, state: runtimeApi.ContainerState_CONTAINER_EXITED}, {pod: pod, container: &init1, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
} }
fakes := makeFakeContainers(t, m, templates) fakes := makeFakeContainers(t, m, templates)
fakeRuntime.SetFakeContainers(fakes) fakeRuntime.SetFakeContainers(fakes)
@@ -633,12 +633,12 @@ func TestSyncPodWithInitContainers(t *testing.T) {
// and container with default attempt number 0. // and container with default attempt number 0.
buildContainerID := func(pod *v1.Pod, container v1.Container) string { buildContainerID := func(pod *v1.Pod, container v1.Container) string {
uid := string(pod.UID) uid := string(pod.UID)
sandboxID := apitest.BuildSandboxName(&runtimeApi.PodSandboxMetadata{ sandboxID := apitest.BuildSandboxName(&runtimeapi.PodSandboxMetadata{
Name: &pod.Name, Name: &pod.Name,
Uid: &uid, Uid: &uid,
Namespace: &pod.Namespace, Namespace: &pod.Namespace,
}) })
return apitest.BuildContainerName(&runtimeApi.ContainerMetadata{Name: &container.Name}, sandboxID) return apitest.BuildContainerName(&runtimeapi.ContainerMetadata{Name: &container.Name}, sandboxID)
} }
backOff := flowcontrol.NewBackOff(time.Second, time.Minute) backOff := flowcontrol.NewBackOff(time.Second, time.Minute)

View File

@@ -24,7 +24,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/format"
@@ -59,12 +59,12 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32
} }
// generatePodSandboxConfig generates pod sandbox config from v1.Pod. // generatePodSandboxConfig generates pod sandbox config from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeApi.PodSandboxConfig, error) { func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) {
// TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup // TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup
// Refer https://github.com/kubernetes/kubernetes/issues/29871 // Refer https://github.com/kubernetes/kubernetes/issues/29871
podUID := string(pod.UID) podUID := string(pod.UID)
podSandboxConfig := &runtimeApi.PodSandboxConfig{ podSandboxConfig := &runtimeapi.PodSandboxConfig{
Metadata: &runtimeApi.PodSandboxMetadata{ Metadata: &runtimeapi.PodSandboxMetadata{
Name: &pod.Name, Name: &pod.Name,
Namespace: &pod.Namespace, Namespace: &pod.Namespace,
Uid: &podUID, Uid: &podUID,
@@ -79,7 +79,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
if err != nil { if err != nil {
return nil, err return nil, err
} }
podSandboxConfig.DnsConfig = &runtimeApi.DNSConfig{ podSandboxConfig.DnsConfig = &runtimeapi.DNSConfig{
Servers: dnsServers, Servers: dnsServers,
Searches: dnsSearches, Searches: dnsSearches,
Options: defaultDNSOptions, Options: defaultDNSOptions,
@@ -96,7 +96,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
podSandboxConfig.LogDirectory = &logDir podSandboxConfig.LogDirectory = &logDir
cgroupParent := "" cgroupParent := ""
portMappings := []*runtimeApi.PortMapping{} portMappings := []*runtimeapi.PortMapping{}
for _, c := range pod.Spec.Containers { for _, c := range pod.Spec.Containers {
// TODO: use a separate interface to only generate portmappings // TODO: use a separate interface to only generate portmappings
opts, err := m.runtimeHelper.GenerateRunContainerOptions(pod, &c, "") opts, err := m.runtimeHelper.GenerateRunContainerOptions(pod, &c, "")
@@ -109,7 +109,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
hostPort := int32(port.HostPort) hostPort := int32(port.HostPort)
containerPort := int32(port.ContainerPort) containerPort := int32(port.ContainerPort)
protocol := toRuntimeProtocol(port.Protocol) protocol := toRuntimeProtocol(port.Protocol)
portMappings = append(portMappings, &runtimeApi.PortMapping{ portMappings = append(portMappings, &runtimeapi.PortMapping{
HostIp: &port.HostIP, HostIp: &port.HostIP,
HostPort: &hostPort, HostPort: &hostPort,
ContainerPort: &containerPort, ContainerPort: &containerPort,
@@ -129,19 +129,19 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp
} }
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod. // generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, cgroupParent string) *runtimeApi.LinuxPodSandboxConfig { func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, cgroupParent string) *runtimeapi.LinuxPodSandboxConfig {
if pod.Spec.SecurityContext == nil && cgroupParent == "" { if pod.Spec.SecurityContext == nil && cgroupParent == "" {
return nil return nil
} }
lc := &runtimeApi.LinuxPodSandboxConfig{} lc := &runtimeapi.LinuxPodSandboxConfig{}
if cgroupParent != "" { if cgroupParent != "" {
lc.CgroupParent = &cgroupParent lc.CgroupParent = &cgroupParent
} }
if pod.Spec.SecurityContext != nil { if pod.Spec.SecurityContext != nil {
sc := pod.Spec.SecurityContext sc := pod.Spec.SecurityContext
lc.SecurityContext = &runtimeApi.LinuxSandboxSecurityContext{ lc.SecurityContext = &runtimeapi.LinuxSandboxSecurityContext{
NamespaceOptions: &runtimeApi.NamespaceOption{ NamespaceOptions: &runtimeapi.NamespaceOption{
HostNetwork: &pod.Spec.HostNetwork, HostNetwork: &pod.Spec.HostNetwork,
HostIpc: &pod.Spec.HostIPC, HostIpc: &pod.Spec.HostIPC,
HostPid: &pod.Spec.HostPID, HostPid: &pod.Spec.HostPID,
@@ -159,7 +159,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, c
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, sc.SupplementalGroups...) lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, sc.SupplementalGroups...)
} }
if sc.SELinuxOptions != nil { if sc.SELinuxOptions != nil {
lc.SecurityContext.SelinuxOptions = &runtimeApi.SELinuxOption{ lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{
User: &sc.SELinuxOptions.User, User: &sc.SELinuxOptions.User,
Role: &sc.SELinuxOptions.Role, Role: &sc.SELinuxOptions.Role,
Type: &sc.SELinuxOptions.Type, Type: &sc.SELinuxOptions.Type,
@@ -172,11 +172,11 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, c
} }
// getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet. // getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet.
func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeApi.PodSandbox, error) { func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi.PodSandbox, error) {
var filter *runtimeApi.PodSandboxFilter var filter *runtimeapi.PodSandboxFilter
if !all { if !all {
readyState := runtimeApi.PodSandboxState_SANDBOX_READY readyState := runtimeapi.PodSandboxState_SANDBOX_READY
filter = &runtimeApi.PodSandboxFilter{ filter = &runtimeapi.PodSandboxFilter{
State: &readyState, State: &readyState,
} }
} }
@@ -187,7 +187,7 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeApi
return nil, err return nil, err
} }
result := []*runtimeApi.PodSandbox{} result := []*runtimeapi.PodSandbox{}
for _, s := range resp { for _, s := range resp {
if !isManagedByKubelet(s.Labels) { if !isManagedByKubelet(s.Labels) {
glog.V(5).Infof("Sandbox %s is not managed by kubelet", kubecontainer.BuildPodFullName( glog.V(5).Infof("Sandbox %s is not managed by kubelet", kubecontainer.BuildPodFullName(
@@ -202,7 +202,7 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeApi
} }
// determinePodSandboxIP determines the IP address of the given pod sandbox. // determinePodSandboxIP determines the IP address of the given pod sandbox.
func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName string, podSandbox *runtimeApi.PodSandboxStatus) string { func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName string, podSandbox *runtimeapi.PodSandboxStatus) string {
if podSandbox.Network == nil { if podSandbox.Network == nil {
glog.Warningf("Pod Sandbox status doesn't have network information, cannot report IP") glog.Warningf("Pod Sandbox status doesn't have network information, cannot report IP")
return "" return ""
@@ -217,8 +217,8 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName
// getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error). // getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error).
// Param state could be nil in order to get all sandboxes belonging to same pod. // Param state could be nil in order to get all sandboxes belonging to same pod.
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, state *runtimeApi.PodSandboxState) ([]string, error) { func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) {
filter := &runtimeApi.PodSandboxFilter{ filter := &runtimeapi.PodSandboxFilter{
State: state, State: state,
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)}, LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)},
} }
@@ -252,7 +252,7 @@ func (m *kubeGenericRuntimeManager) GetPortForward(podName, podNamespace string,
return nil, fmt.Errorf("failed to find sandboxID for pod %s", format.PodDesc(podName, podNamespace, podUID)) return nil, fmt.Errorf("failed to find sandboxID for pod %s", format.PodDesc(podName, podNamespace, podUID))
} }
// TODO: Port is unused for now, but we may need it in the future. // TODO: Port is unused for now, but we may need it in the future.
req := &runtimeApi.PortForwardRequest{ req := &runtimeapi.PortForwardRequest{
PodSandboxId: &sandboxIDs[0], PodSandboxId: &sandboxIDs[0],
} }
resp, err := m.runtimeService.PortForward(req) resp, err := m.runtimeService.PortForward(req)

View File

@@ -23,7 +23,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
) )
@@ -57,7 +57,7 @@ func TestCreatePodSandbox(t *testing.T) {
id, _, err := m.createPodSandbox(pod, 1) id, _, err := m.createPodSandbox(pod, 1)
assert.NoError(t, err) assert.NoError(t, err)
fakeRuntime.AssertCalls([]string{"RunPodSandbox"}) fakeRuntime.AssertCalls([]string{"RunPodSandbox"})
sandboxes, err := fakeRuntime.ListPodSandbox(&runtimeApi.PodSandboxFilter{Id: &id}) sandboxes, err := fakeRuntime.ListPodSandbox(&runtimeapi.PodSandboxFilter{Id: &id})
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, len(sandboxes), 1) assert.Equal(t, len(sandboxes), 1)
// TODO Check pod sandbox configuration // TODO Check pod sandbox configuration

View File

@@ -14,6 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
// Package remote containers gRPC implementation of internalApi.RuntimeService // Package remote containers gRPC implementation of internalapi.RuntimeService
// and internalApi.ImageManagerService. // and internalapi.ImageManagerService.
package remote package remote

View File

@@ -21,18 +21,18 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"google.golang.org/grpc" "google.golang.org/grpc"
internalApi "k8s.io/kubernetes/pkg/kubelet/api" internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
// RemoteImageService is a gRPC implementation of internalApi.ImageManagerService. // RemoteImageService is a gRPC implementation of internalapi.ImageManagerService.
type RemoteImageService struct { type RemoteImageService struct {
timeout time.Duration timeout time.Duration
imageClient runtimeApi.ImageServiceClient imageClient runtimeapi.ImageServiceClient
} }
// NewRemoteImageService creates a new internalApi.ImageManagerService. // NewRemoteImageService creates a new internalapi.ImageManagerService.
func NewRemoteImageService(addr string, connectionTimout time.Duration) (internalApi.ImageManagerService, error) { func NewRemoteImageService(addr string, connectionTimout time.Duration) (internalapi.ImageManagerService, error) {
glog.V(3).Infof("Connecting to image service %s", addr) glog.V(3).Infof("Connecting to image service %s", addr)
conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(connectionTimout), grpc.WithDialer(dial)) conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(connectionTimout), grpc.WithDialer(dial))
if err != nil { if err != nil {
@@ -42,16 +42,16 @@ func NewRemoteImageService(addr string, connectionTimout time.Duration) (interna
return &RemoteImageService{ return &RemoteImageService{
timeout: connectionTimout, timeout: connectionTimout,
imageClient: runtimeApi.NewImageServiceClient(conn), imageClient: runtimeapi.NewImageServiceClient(conn),
}, nil }, nil
} }
// ListImages lists available images. // ListImages lists available images.
func (r *RemoteImageService) ListImages(filter *runtimeApi.ImageFilter) ([]*runtimeApi.Image, error) { func (r *RemoteImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
resp, err := r.imageClient.ListImages(ctx, &runtimeApi.ListImagesRequest{ resp, err := r.imageClient.ListImages(ctx, &runtimeapi.ListImagesRequest{
Filter: filter, Filter: filter,
}) })
if err != nil { if err != nil {
@@ -63,11 +63,11 @@ func (r *RemoteImageService) ListImages(filter *runtimeApi.ImageFilter) ([]*runt
} }
// ImageStatus returns the status of the image. // ImageStatus returns the status of the image.
func (r *RemoteImageService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeApi.Image, error) { func (r *RemoteImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
resp, err := r.imageClient.ImageStatus(ctx, &runtimeApi.ImageStatusRequest{ resp, err := r.imageClient.ImageStatus(ctx, &runtimeapi.ImageStatusRequest{
Image: image, Image: image,
}) })
if err != nil { if err != nil {
@@ -79,11 +79,11 @@ func (r *RemoteImageService) ImageStatus(image *runtimeApi.ImageSpec) (*runtimeA
} }
// PullImage pulls an image with authentication config. // PullImage pulls an image with authentication config.
func (r *RemoteImageService) PullImage(image *runtimeApi.ImageSpec, auth *runtimeApi.AuthConfig) error { func (r *RemoteImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) error {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
_, err := r.imageClient.PullImage(ctx, &runtimeApi.PullImageRequest{ _, err := r.imageClient.PullImage(ctx, &runtimeapi.PullImageRequest{
Image: image, Image: image,
Auth: auth, Auth: auth,
}) })
@@ -96,11 +96,11 @@ func (r *RemoteImageService) PullImage(image *runtimeApi.ImageSpec, auth *runtim
} }
// RemoveImage removes the image. // RemoveImage removes the image.
func (r *RemoteImageService) RemoveImage(image *runtimeApi.ImageSpec) error { func (r *RemoteImageService) RemoveImage(image *runtimeapi.ImageSpec) error {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
_, err := r.imageClient.RemoveImage(ctx, &runtimeApi.RemoveImageRequest{ _, err := r.imageClient.RemoveImage(ctx, &runtimeapi.RemoveImageRequest{
Image: image, Image: image,
}) })
if err != nil { if err != nil {

View File

@@ -23,19 +23,19 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"google.golang.org/grpc" "google.golang.org/grpc"
internalApi "k8s.io/kubernetes/pkg/kubelet/api" internalapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
utilexec "k8s.io/kubernetes/pkg/util/exec" utilexec "k8s.io/kubernetes/pkg/util/exec"
) )
// RemoteRuntimeService is a gRPC implementation of internalApi.RuntimeService. // RemoteRuntimeService is a gRPC implementation of internalapi.RuntimeService.
type RemoteRuntimeService struct { type RemoteRuntimeService struct {
timeout time.Duration timeout time.Duration
runtimeClient runtimeApi.RuntimeServiceClient runtimeClient runtimeapi.RuntimeServiceClient
} }
// NewRemoteRuntimeService creates a new internalApi.RuntimeService. // NewRemoteRuntimeService creates a new internalapi.RuntimeService.
func NewRemoteRuntimeService(addr string, connectionTimout time.Duration) (internalApi.RuntimeService, error) { func NewRemoteRuntimeService(addr string, connectionTimout time.Duration) (internalapi.RuntimeService, error) {
glog.Infof("Connecting to runtime service %s", addr) glog.Infof("Connecting to runtime service %s", addr)
conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(connectionTimout), grpc.WithDialer(dial)) conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(connectionTimout), grpc.WithDialer(dial))
if err != nil { if err != nil {
@@ -45,16 +45,16 @@ func NewRemoteRuntimeService(addr string, connectionTimout time.Duration) (inter
return &RemoteRuntimeService{ return &RemoteRuntimeService{
timeout: connectionTimout, timeout: connectionTimout,
runtimeClient: runtimeApi.NewRuntimeServiceClient(conn), runtimeClient: runtimeapi.NewRuntimeServiceClient(conn),
}, nil }, nil
} }
// Version returns the runtime name, runtime version and runtime API version. // Version returns the runtime name, runtime version and runtime API version.
func (r *RemoteRuntimeService) Version(apiVersion string) (*runtimeApi.VersionResponse, error) { func (r *RemoteRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
typedVersion, err := r.runtimeClient.Version(ctx, &runtimeApi.VersionRequest{ typedVersion, err := r.runtimeClient.Version(ctx, &runtimeapi.VersionRequest{
Version: &apiVersion, Version: &apiVersion,
}) })
if err != nil { if err != nil {
@@ -67,11 +67,11 @@ func (r *RemoteRuntimeService) Version(apiVersion string) (*runtimeApi.VersionRe
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure // RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure
// the sandbox is in ready state. // the sandbox is in ready state.
func (r *RemoteRuntimeService) RunPodSandbox(config *runtimeApi.PodSandboxConfig) (string, error) { func (r *RemoteRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
resp, err := r.runtimeClient.RunPodSandbox(ctx, &runtimeApi.RunPodSandboxRequest{ resp, err := r.runtimeClient.RunPodSandbox(ctx, &runtimeapi.RunPodSandboxRequest{
Config: config, Config: config,
}) })
if err != nil { if err != nil {
@@ -88,7 +88,7 @@ func (r *RemoteRuntimeService) StopPodSandbox(podSandBoxID string) error {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
_, err := r.runtimeClient.StopPodSandbox(ctx, &runtimeApi.StopPodSandboxRequest{ _, err := r.runtimeClient.StopPodSandbox(ctx, &runtimeapi.StopPodSandboxRequest{
PodSandboxId: &podSandBoxID, PodSandboxId: &podSandBoxID,
}) })
if err != nil { if err != nil {
@@ -105,7 +105,7 @@ func (r *RemoteRuntimeService) RemovePodSandbox(podSandBoxID string) error {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
_, err := r.runtimeClient.RemovePodSandbox(ctx, &runtimeApi.RemovePodSandboxRequest{ _, err := r.runtimeClient.RemovePodSandbox(ctx, &runtimeapi.RemovePodSandboxRequest{
PodSandboxId: &podSandBoxID, PodSandboxId: &podSandBoxID,
}) })
if err != nil { if err != nil {
@@ -117,11 +117,11 @@ func (r *RemoteRuntimeService) RemovePodSandbox(podSandBoxID string) error {
} }
// PodSandboxStatus returns the status of the PodSandbox. // PodSandboxStatus returns the status of the PodSandbox.
func (r *RemoteRuntimeService) PodSandboxStatus(podSandBoxID string) (*runtimeApi.PodSandboxStatus, error) { func (r *RemoteRuntimeService) PodSandboxStatus(podSandBoxID string) (*runtimeapi.PodSandboxStatus, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
resp, err := r.runtimeClient.PodSandboxStatus(ctx, &runtimeApi.PodSandboxStatusRequest{ resp, err := r.runtimeClient.PodSandboxStatus(ctx, &runtimeapi.PodSandboxStatusRequest{
PodSandboxId: &podSandBoxID, PodSandboxId: &podSandBoxID,
}) })
if err != nil { if err != nil {
@@ -133,11 +133,11 @@ func (r *RemoteRuntimeService) PodSandboxStatus(podSandBoxID string) (*runtimeAp
} }
// ListPodSandbox returns a list of PodSandboxes. // ListPodSandbox returns a list of PodSandboxes.
func (r *RemoteRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter) ([]*runtimeApi.PodSandbox, error) { func (r *RemoteRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
resp, err := r.runtimeClient.ListPodSandbox(ctx, &runtimeApi.ListPodSandboxRequest{ resp, err := r.runtimeClient.ListPodSandbox(ctx, &runtimeapi.ListPodSandboxRequest{
Filter: filter, Filter: filter,
}) })
if err != nil { if err != nil {
@@ -149,11 +149,11 @@ func (r *RemoteRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandboxFilte
} }
// CreateContainer creates a new container in the specified PodSandbox. // CreateContainer creates a new container in the specified PodSandbox.
func (r *RemoteRuntimeService) CreateContainer(podSandBoxID string, config *runtimeApi.ContainerConfig, sandboxConfig *runtimeApi.PodSandboxConfig) (string, error) { func (r *RemoteRuntimeService) CreateContainer(podSandBoxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
resp, err := r.runtimeClient.CreateContainer(ctx, &runtimeApi.CreateContainerRequest{ resp, err := r.runtimeClient.CreateContainer(ctx, &runtimeapi.CreateContainerRequest{
PodSandboxId: &podSandBoxID, PodSandboxId: &podSandBoxID,
Config: config, Config: config,
SandboxConfig: sandboxConfig, SandboxConfig: sandboxConfig,
@@ -171,7 +171,7 @@ func (r *RemoteRuntimeService) StartContainer(containerID string) error {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
_, err := r.runtimeClient.StartContainer(ctx, &runtimeApi.StartContainerRequest{ _, err := r.runtimeClient.StartContainer(ctx, &runtimeapi.StartContainerRequest{
ContainerId: &containerID, ContainerId: &containerID,
}) })
if err != nil { if err != nil {
@@ -187,7 +187,7 @@ func (r *RemoteRuntimeService) StopContainer(containerID string, timeout int64)
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
_, err := r.runtimeClient.StopContainer(ctx, &runtimeApi.StopContainerRequest{ _, err := r.runtimeClient.StopContainer(ctx, &runtimeapi.StopContainerRequest{
ContainerId: &containerID, ContainerId: &containerID,
Timeout: &timeout, Timeout: &timeout,
}) })
@@ -205,7 +205,7 @@ func (r *RemoteRuntimeService) RemoveContainer(containerID string) error {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
_, err := r.runtimeClient.RemoveContainer(ctx, &runtimeApi.RemoveContainerRequest{ _, err := r.runtimeClient.RemoveContainer(ctx, &runtimeapi.RemoveContainerRequest{
ContainerId: &containerID, ContainerId: &containerID,
}) })
if err != nil { if err != nil {
@@ -217,11 +217,11 @@ func (r *RemoteRuntimeService) RemoveContainer(containerID string) error {
} }
// ListContainers lists containers by filters. // ListContainers lists containers by filters.
func (r *RemoteRuntimeService) ListContainers(filter *runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) { func (r *RemoteRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
resp, err := r.runtimeClient.ListContainers(ctx, &runtimeApi.ListContainersRequest{ resp, err := r.runtimeClient.ListContainers(ctx, &runtimeapi.ListContainersRequest{
Filter: filter, Filter: filter,
}) })
if err != nil { if err != nil {
@@ -233,11 +233,11 @@ func (r *RemoteRuntimeService) ListContainers(filter *runtimeApi.ContainerFilter
} }
// ContainerStatus returns the container status. // ContainerStatus returns the container status.
func (r *RemoteRuntimeService) ContainerStatus(containerID string) (*runtimeApi.ContainerStatus, error) { func (r *RemoteRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
resp, err := r.runtimeClient.ContainerStatus(ctx, &runtimeApi.ContainerStatusRequest{ resp, err := r.runtimeClient.ContainerStatus(ctx, &runtimeapi.ContainerStatusRequest{
ContainerId: &containerID, ContainerId: &containerID,
}) })
if err != nil { if err != nil {
@@ -255,7 +255,7 @@ func (r *RemoteRuntimeService) ExecSync(containerID string, cmd []string, timeou
defer cancel() defer cancel()
timeoutSeconds := int64(timeout.Seconds()) timeoutSeconds := int64(timeout.Seconds())
req := &runtimeApi.ExecSyncRequest{ req := &runtimeapi.ExecSyncRequest{
ContainerId: &containerID, ContainerId: &containerID,
Cmd: cmd, Cmd: cmd,
Timeout: &timeoutSeconds, Timeout: &timeoutSeconds,
@@ -278,7 +278,7 @@ func (r *RemoteRuntimeService) ExecSync(containerID string, cmd []string, timeou
} }
// Exec prepares a streaming endpoint to execute a command in the container, and returns the address. // Exec prepares a streaming endpoint to execute a command in the container, and returns the address.
func (r *RemoteRuntimeService) Exec(req *runtimeApi.ExecRequest) (*runtimeApi.ExecResponse, error) { func (r *RemoteRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
@@ -292,7 +292,7 @@ func (r *RemoteRuntimeService) Exec(req *runtimeApi.ExecRequest) (*runtimeApi.Ex
} }
// Attach prepares a streaming endpoint to attach to a running container, and returns the address. // Attach prepares a streaming endpoint to attach to a running container, and returns the address.
func (r *RemoteRuntimeService) Attach(req *runtimeApi.AttachRequest) (*runtimeApi.AttachResponse, error) { func (r *RemoteRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
@@ -306,7 +306,7 @@ func (r *RemoteRuntimeService) Attach(req *runtimeApi.AttachRequest) (*runtimeAp
} }
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address. // PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address.
func (r *RemoteRuntimeService) PortForward(req *runtimeApi.PortForwardRequest) (*runtimeApi.PortForwardResponse, error) { func (r *RemoteRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
@@ -322,14 +322,14 @@ func (r *RemoteRuntimeService) PortForward(req *runtimeApi.PortForwardRequest) (
// UpdateRuntimeConfig updates the config of a runtime service. The only // UpdateRuntimeConfig updates the config of a runtime service. The only
// update payload currently supported is the pod CIDR assigned to a node, // update payload currently supported is the pod CIDR assigned to a node,
// and the runtime service just proxies it down to the network plugin. // and the runtime service just proxies it down to the network plugin.
func (r *RemoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeApi.RuntimeConfig) error { func (r *RemoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
// Response doesn't contain anything of interest. This translates to an // Response doesn't contain anything of interest. This translates to an
// Event notification to the network plugin, which can't fail, so we're // Event notification to the network plugin, which can't fail, so we're
// really looking to surface destination unreachable. // really looking to surface destination unreachable.
_, err := r.runtimeClient.UpdateRuntimeConfig(ctx, &runtimeApi.UpdateRuntimeConfigRequest{ _, err := r.runtimeClient.UpdateRuntimeConfig(ctx, &runtimeapi.UpdateRuntimeConfigRequest{
RuntimeConfig: runtimeConfig, RuntimeConfig: runtimeConfig,
}) })
@@ -341,11 +341,11 @@ func (r *RemoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeApi.Run
} }
// Status returns the status of the runtime. // Status returns the status of the runtime.
func (r *RemoteRuntimeService) Status() (*runtimeApi.RuntimeStatus, error) { func (r *RemoteRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) {
ctx, cancel := getContextWithTimeout(r.timeout) ctx, cancel := getContextWithTimeout(r.timeout)
defer cancel() defer cancel()
resp, err := r.runtimeClient.Status(ctx, &runtimeApi.StatusRequest{}) resp, err := r.runtimeClient.Status(ctx, &runtimeapi.StatusRequest{})
if err != nil { if err != nil {
glog.Errorf("Status from runtime service failed: %v", err) glog.Errorf("Status from runtime service failed: %v", err)
return nil, err return nil, err

View File

@@ -19,8 +19,8 @@ package rktshim
import ( import (
"time" "time"
kubeletApi "k8s.io/kubernetes/pkg/kubelet/api" kubeletapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
// Runtime provides an API for lifecycle, inspection and introspection // Runtime provides an API for lifecycle, inspection and introspection
@@ -31,12 +31,12 @@ type Runtime struct{}
type RuntimeConfig struct{} type RuntimeConfig struct{}
// NewRuntime creates a container.Runtime instance using the Runtime. // NewRuntime creates a container.Runtime instance using the Runtime.
func NewRuntime(RuntimeConfig) (kubeletApi.ContainerManager, error) { func NewRuntime(RuntimeConfig) (kubeletapi.ContainerManager, error) {
return &Runtime{}, nil return &Runtime{}, nil
} }
// CreateContainer creates an app inside the provided pod sandbox and returns the RawContainerID. // CreateContainer creates an app inside the provided pod sandbox and returns the RawContainerID.
func (*Runtime) CreateContainer(string, *runtimeApi.ContainerConfig, *runtimeApi.PodSandboxConfig) (string, error) { func (*Runtime) CreateContainer(string, *runtimeapi.ContainerConfig, *runtimeapi.PodSandboxConfig) (string, error) {
panic("not implemented") panic("not implemented")
} }
@@ -56,12 +56,12 @@ func (*Runtime) RemoveContainer(string) error {
} }
// ListContainers lists out the apps residing inside the pod sandbox using the ContainerFilter. // ListContainers lists out the apps residing inside the pod sandbox using the ContainerFilter.
func (*Runtime) ListContainers(*runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) { func (*Runtime) ListContainers(*runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
panic("not implemented") panic("not implemented")
} }
// ContainerStatus returns the RawContainerStatus of an app inside the pod sandbox. // ContainerStatus returns the RawContainerStatus of an app inside the pod sandbox.
func (*Runtime) ContainerStatus(string) (*runtimeApi.ContainerStatus, error) { func (*Runtime) ContainerStatus(string) (*runtimeapi.ContainerStatus, error) {
panic("not implemented") panic("not implemented")
} }
@@ -72,11 +72,11 @@ func (*Runtime) ExecSync(containerID string, cmd []string, timeout time.Duration
} }
// Exec prepares a streaming endpoint to execute a command in the container, and returns the address. // Exec prepares a streaming endpoint to execute a command in the container, and returns the address.
func (*Runtime) Exec(*runtimeApi.ExecRequest) (*runtimeApi.ExecResponse, error) { func (*Runtime) Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
panic("not implemented") panic("not implemented")
} }
// Attach prepares a streaming endpoint to attach to a running container, and returns the address. // Attach prepares a streaming endpoint to attach to a running container, and returns the address.
func (*Runtime) Attach(req *runtimeApi.AttachRequest) (*runtimeApi.AttachResponse, error) { func (*Runtime) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
panic("not implemented") panic("not implemented")
} }

View File

@@ -23,8 +23,8 @@ import (
"math/rand" "math/rand"
"time" "time"
kubeletApi "k8s.io/kubernetes/pkg/kubelet/api" kubeletapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/util/ioutils" "k8s.io/kubernetes/pkg/kubelet/util/ioutils"
) )
@@ -61,7 +61,7 @@ type FakeRuntime struct {
type FakeRuntimeConfig struct{} type FakeRuntimeConfig struct{}
func NewFakeRuntime() (kubeletApi.ContainerManager, error) { func NewFakeRuntime() (kubeletapi.ContainerManager, error) {
return &FakeRuntime{Containers: make(containerRegistry)}, nil return &FakeRuntime{Containers: make(containerRegistry)}, nil
} }
@@ -78,23 +78,23 @@ func newCharacterStreams(in io.Reader, out io.Writer, err io.Writer) characterSt
} }
type fakeContainer struct { type fakeContainer struct {
Config *runtimeApi.ContainerConfig Config *runtimeapi.ContainerConfig
Status runtimeApi.ContainerStatus Status runtimeapi.ContainerStatus
State runtimeApi.ContainerState State runtimeapi.ContainerState
Streams characterStreams Streams characterStreams
} }
func (c *fakeContainer) Start() { func (c *fakeContainer) Start() {
c.State = runtimeApi.ContainerState_CONTAINER_RUNNING c.State = runtimeapi.ContainerState_CONTAINER_RUNNING
c.Status.State = &c.State c.Status.State = &c.State
} }
func (c *fakeContainer) Stop() { func (c *fakeContainer) Stop() {
c.State = runtimeApi.ContainerState_CONTAINER_EXITED c.State = runtimeapi.ContainerState_CONTAINER_EXITED
c.Status.State = &c.State c.Status.State = &c.State
@@ -115,7 +115,7 @@ func (c *fakeContainer) Exec(cmd []string, in io.Reader, out, err io.WriteCloser
type containerRegistry map[string]*fakeContainer type containerRegistry map[string]*fakeContainer
func (r *FakeRuntime) CreateContainer(pid string, cfg *runtimeApi.ContainerConfig, sandboxCfg *runtimeApi.PodSandboxConfig) (string, error) { func (r *FakeRuntime) CreateContainer(pid string, cfg *runtimeapi.ContainerConfig, sandboxCfg *runtimeapi.PodSandboxConfig) (string, error) {
// TODO(tmrts): allow customization // TODO(tmrts): allow customization
containerIDLength := 8 containerIDLength := 8
@@ -135,11 +135,11 @@ func (r *FakeRuntime) StartContainer(id string) error {
return ErrContainerNotFound return ErrContainerNotFound
} }
switch c.State { switch c.State {
case runtimeApi.ContainerState_CONTAINER_EXITED: case runtimeapi.ContainerState_CONTAINER_EXITED:
fallthrough fallthrough
case runtimeApi.ContainerState_CONTAINER_CREATED: case runtimeapi.ContainerState_CONTAINER_CREATED:
c.Start() c.Start()
case runtimeApi.ContainerState_CONTAINER_UNKNOWN: case runtimeapi.ContainerState_CONTAINER_UNKNOWN:
// TODO(tmrts): add timeout to Start API or generalize timeout somehow // TODO(tmrts): add timeout to Start API or generalize timeout somehow
//<-time.After(time.Duration(timeout) * time.Second) //<-time.After(time.Duration(timeout) * time.Second)
fallthrough fallthrough
@@ -157,9 +157,9 @@ func (r *FakeRuntime) StopContainer(id string, timeout int64) error {
} }
switch c.State { switch c.State {
case runtimeApi.ContainerState_CONTAINER_RUNNING: case runtimeapi.ContainerState_CONTAINER_RUNNING:
c.State = runtimeApi.ContainerState_CONTAINER_EXITED // This state might not be the best one c.State = runtimeapi.ContainerState_CONTAINER_EXITED // This state might not be the best one
case runtimeApi.ContainerState_CONTAINER_UNKNOWN: case runtimeapi.ContainerState_CONTAINER_UNKNOWN:
<-time.After(time.Duration(timeout) * time.Second) <-time.After(time.Duration(timeout) * time.Second)
fallthrough fallthrough
default: default:
@@ -181,12 +181,12 @@ func (r *FakeRuntime) RemoveContainer(id string) error {
return nil return nil
} }
func (r *FakeRuntime) ListContainers(*runtimeApi.ContainerFilter) ([]*runtimeApi.Container, error) { func (r *FakeRuntime) ListContainers(*runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
list := []*runtimeApi.Container{} list := []*runtimeapi.Container{}
// TODO(tmrts): apply the filter // TODO(tmrts): apply the filter
for _, c := range r.Containers { for _, c := range r.Containers {
list = append(list, &runtimeApi.Container{ list = append(list, &runtimeapi.Container{
Id: c.Status.Id, Id: c.Status.Id,
Metadata: c.Config.Metadata, Metadata: c.Config.Metadata,
Labels: c.Config.Labels, Labels: c.Config.Labels,
@@ -198,10 +198,10 @@ func (r *FakeRuntime) ListContainers(*runtimeApi.ContainerFilter) ([]*runtimeApi
return list, nil return list, nil
} }
func (r *FakeRuntime) ContainerStatus(id string) (*runtimeApi.ContainerStatus, error) { func (r *FakeRuntime) ContainerStatus(id string) (*runtimeapi.ContainerStatus, error) {
c, ok := r.Containers[id] c, ok := r.Containers[id]
if !ok { if !ok {
return &runtimeApi.ContainerStatus{}, ErrContainerNotFound return &runtimeapi.ContainerStatus{}, ErrContainerNotFound
} }
return &c.Status, nil return &c.Status, nil
@@ -214,7 +214,7 @@ func (r *FakeRuntime) ExecSync(containerID string, cmd []string, timeout time.Du
} }
// TODO(tmrts): Validate the assumption that container has to be running for exec to work. // TODO(tmrts): Validate the assumption that container has to be running for exec to work.
if c.State != runtimeApi.ContainerState_CONTAINER_RUNNING { if c.State != runtimeapi.ContainerState_CONTAINER_RUNNING {
return nil, nil, ErrInvalidContainerStateTransition return nil, nil, ErrInvalidContainerStateTransition
} }
@@ -225,16 +225,16 @@ func (r *FakeRuntime) ExecSync(containerID string, cmd []string, timeout time.Du
return stdoutBuffer.Bytes(), stderrBuffer.Bytes(), err return stdoutBuffer.Bytes(), stderrBuffer.Bytes(), err
} }
func (r *FakeRuntime) Exec(req *runtimeApi.ExecRequest) (*runtimeApi.ExecResponse, error) { func (r *FakeRuntime) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
url := "http://" + FakeStreamingHost + ":" + FakeStreamingPort + "/exec/" + req.GetContainerId() url := "http://" + FakeStreamingHost + ":" + FakeStreamingPort + "/exec/" + req.GetContainerId()
return &runtimeApi.ExecResponse{ return &runtimeapi.ExecResponse{
Url: &url, Url: &url,
}, nil }, nil
} }
func (r *FakeRuntime) Attach(req *runtimeApi.AttachRequest) (*runtimeApi.AttachResponse, error) { func (r *FakeRuntime) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
url := "http://" + FakeStreamingHost + ":" + FakeStreamingPort + "/attach/" + req.GetContainerId() url := "http://" + FakeStreamingHost + ":" + FakeStreamingPort + "/attach/" + req.GetContainerId()
return &runtimeApi.AttachResponse{ return &runtimeapi.AttachResponse{
Url: &url, Url: &url,
}, nil }, nil
} }

View File

@@ -19,7 +19,7 @@ package rktshim
import ( import (
"errors" "errors"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
// TODO(tmrts): Move these errors to the container API for code re-use. // TODO(tmrts): Move these errors to the container API for code re-use.
@@ -41,21 +41,21 @@ func NewImageStore(ImageStoreConfig) (*ImageStore, error) {
} }
// List lists the images residing in the image store. // List lists the images residing in the image store.
func (*ImageStore) List() ([]runtimeApi.Image, error) { func (*ImageStore) List() ([]runtimeapi.Image, error) {
panic("not implemented") panic("not implemented")
} }
// Pull pulls an image into the image store and uses the given authentication method. // Pull pulls an image into the image store and uses the given authentication method.
func (*ImageStore) Pull(runtimeApi.ImageSpec, runtimeApi.AuthConfig, *runtimeApi.PodSandboxConfig) error { func (*ImageStore) Pull(runtimeapi.ImageSpec, runtimeapi.AuthConfig, *runtimeapi.PodSandboxConfig) error {
panic("not implemented") panic("not implemented")
} }
// Remove removes the image from the image store. // Remove removes the image from the image store.
func (*ImageStore) Remove(runtimeApi.ImageSpec) error { func (*ImageStore) Remove(runtimeapi.ImageSpec) error {
panic("not implemented") panic("not implemented")
} }
// Status returns the status of the image. // Status returns the status of the image.
func (*ImageStore) Status(runtimeApi.ImageSpec) (runtimeApi.Image, error) { func (*ImageStore) Status(runtimeapi.ImageSpec) (runtimeapi.Image, error) {
panic("not implemented") panic("not implemented")
} }

View File

@@ -21,21 +21,21 @@ import (
"reflect" "reflect"
"testing" "testing"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
var ( var (
emptyImgStoreConfig = ImageStoreConfig{} emptyImgStoreConfig = ImageStoreConfig{}
// TODO(tmrts): fill the pod configuration // TODO(tmrts): fill the pod configuration
testPodConfig *runtimeApi.PodSandboxConfig = nil testPodConfig *runtimeapi.PodSandboxConfig = nil
) )
type imageTestCase struct { type imageTestCase struct {
Spec *runtimeApi.ImageSpec Spec *runtimeapi.ImageSpec
ExpectedStatus *runtimeApi.Image ExpectedStatus *runtimeapi.Image
} }
func compareContainerImages(got, expected runtimeApi.Image) error { func compareContainerImages(got, expected runtimeapi.Image) error {
if got.Id != expected.Id { if got.Id != expected.Id {
return fmt.Errorf("mismatching Ids -> expected %q, got %q", got.Id, expected.Id) return fmt.Errorf("mismatching Ids -> expected %q, got %q", got.Id, expected.Id)
} }
@@ -62,16 +62,16 @@ var (
var testImgSpecs = map[string]imageTestCase{ var testImgSpecs = map[string]imageTestCase{
"non-existent-image": { "non-existent-image": {
&runtimeApi.ImageSpec{ &runtimeapi.ImageSpec{
Image: &gibberishStr, Image: &gibberishStr,
}, },
nil, nil,
}, },
"busybox": { "busybox": {
&runtimeApi.ImageSpec{ &runtimeapi.ImageSpec{
Image: &busyboxStr, Image: &busyboxStr,
}, },
&runtimeApi.Image{ &runtimeapi.Image{
Id: nil, Id: nil,
RepoTags: []string{}, RepoTags: []string{},
RepoDigests: []string{}, RepoDigests: []string{},
@@ -80,7 +80,7 @@ var testImgSpecs = map[string]imageTestCase{
}, },
} }
var testAuthConfig = map[string]runtimeApi.AuthConfig{ var testAuthConfig = map[string]runtimeapi.AuthConfig{
"no-auth": {}, "no-auth": {},
} }

View File

@@ -17,8 +17,8 @@ limitations under the License.
package rktshim package rktshim
import ( import (
kubeletApi "k8s.io/kubernetes/pkg/kubelet/api" kubeletapi "k8s.io/kubernetes/pkg/kubelet/api"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime" runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
) )
// PodSandboxManager provides basic operations to create/delete and examine // PodSandboxManager provides basic operations to create/delete and examine
@@ -29,12 +29,12 @@ type PodSandboxManager struct{}
type PodSandboxManagerConfig struct{} type PodSandboxManagerConfig struct{}
// NewPodSandboxManager creates a PodSandboxManager. // NewPodSandboxManager creates a PodSandboxManager.
func NewPodSandboxManager(PodSandboxManagerConfig) (kubeletApi.PodSandboxManager, error) { func NewPodSandboxManager(PodSandboxManagerConfig) (kubeletapi.PodSandboxManager, error) {
return &PodSandboxManager{}, nil return &PodSandboxManager{}, nil
} }
// RunPodSandbox creates and starts a pod sandbox given a pod sandbox configuration. // RunPodSandbox creates and starts a pod sandbox given a pod sandbox configuration.
func (*PodSandboxManager) RunPodSandbox(*runtimeApi.PodSandboxConfig) (string, error) { func (*PodSandboxManager) RunPodSandbox(*runtimeapi.PodSandboxConfig) (string, error) {
panic("not implemented") panic("not implemented")
} }
@@ -49,16 +49,16 @@ func (*PodSandboxManager) RemovePodSandbox(string) error {
} }
// PodSandboxStatus queries the status of the pod sandbox. // PodSandboxStatus queries the status of the pod sandbox.
func (*PodSandboxManager) PodSandboxStatus(string) (*runtimeApi.PodSandboxStatus, error) { func (*PodSandboxManager) PodSandboxStatus(string) (*runtimeapi.PodSandboxStatus, error) {
panic("not implemented") panic("not implemented")
} }
// ListPodSandbox lists existing sandboxes, filtered by the PodSandboxFilter. // ListPodSandbox lists existing sandboxes, filtered by the PodSandboxFilter.
func (*PodSandboxManager) ListPodSandbox(*runtimeApi.PodSandboxFilter) ([]*runtimeApi.PodSandbox, error) { func (*PodSandboxManager) ListPodSandbox(*runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
panic("not implemented") panic("not implemented")
} }
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address. // PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address.
func (*PodSandboxManager) PortForward(*runtimeApi.PortForwardRequest) (*runtimeApi.PortForwardResponse, error) { func (*PodSandboxManager) PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
panic("not implemented") panic("not implemented")
} }

View File

@@ -19,7 +19,7 @@ package cache
import ( import (
"time" "time"
expirationCache "k8s.io/kubernetes/pkg/client/cache" expirationcache "k8s.io/kubernetes/pkg/client/cache"
) )
// ObjectCache is a simple wrapper of expiration cache that // ObjectCache is a simple wrapper of expiration cache that
@@ -27,7 +27,7 @@ import (
// 2. has an updater to get value directly if it is expired // 2. has an updater to get value directly if it is expired
// 3. then update the cache // 3. then update the cache
type ObjectCache struct { type ObjectCache struct {
cache expirationCache.Store cache expirationcache.Store
updater func() (interface{}, error) updater func() (interface{}, error)
} }
@@ -42,7 +42,7 @@ type objectEntry struct {
func NewObjectCache(f func() (interface{}, error), ttl time.Duration) *ObjectCache { func NewObjectCache(f func() (interface{}, error), ttl time.Duration) *ObjectCache {
return &ObjectCache{ return &ObjectCache{
updater: f, updater: f,
cache: expirationCache.NewTTLStore(stringKeyFunc, ttl), cache: expirationcache.NewTTLStore(stringKeyFunc, ttl),
} }
} }

View File

@@ -21,7 +21,7 @@ import (
"testing" "testing"
"time" "time"
expirationCache "k8s.io/kubernetes/pkg/client/cache" expirationcache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/clock"
) )
@@ -32,11 +32,11 @@ type testObject struct {
// A fake objectCache for unit test. // A fake objectCache for unit test.
func NewFakeObjectCache(f func() (interface{}, error), ttl time.Duration, clock clock.Clock) *ObjectCache { func NewFakeObjectCache(f func() (interface{}, error), ttl time.Duration, clock clock.Clock) *ObjectCache {
ttlPolicy := &expirationCache.TTLPolicy{Ttl: ttl, Clock: clock} ttlPolicy := &expirationcache.TTLPolicy{Ttl: ttl, Clock: clock}
deleteChan := make(chan string, 1) deleteChan := make(chan string, 1)
return &ObjectCache{ return &ObjectCache{
updater: f, updater: f,
cache: expirationCache.NewFakeExpirationStore(stringKeyFunc, deleteChan, ttlPolicy, clock), cache: expirationcache.NewFakeExpirationStore(stringKeyFunc, deleteChan, ttlPolicy, clock),
} }
} }

View File

@@ -30,7 +30,7 @@ import (
"syscall" "syscall"
"github.com/golang/glog" "github.com/golang/glog"
utilExec "k8s.io/kubernetes/pkg/util/exec" utilexec "k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
) )
@@ -332,9 +332,9 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string,
cmd := mounter.Runner.Command("fsck", args...) cmd := mounter.Runner.Command("fsck", args...)
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
ee, isExitError := err.(utilExec.ExitError) ee, isExitError := err.(utilexec.ExitError)
switch { switch {
case err == utilExec.ErrExecutableNotFound: case err == utilexec.ErrExecutableNotFound:
glog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.") glog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.")
case isExitError && ee.ExitStatus() == fsckErrorsCorrected: case isExitError && ee.ExitStatus() == fsckErrorsCorrected:
glog.Infof("Device %s has errors which were corrected by fsck.", source) glog.Infof("Device %s has errors which were corrected by fsck.", source)

View File

@@ -30,7 +30,7 @@ import (
"k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
flockerApi "github.com/clusterhq/flocker-go" flockerapi "github.com/clusterhq/flocker-go"
) )
// This is the primary entrypoint for volume plugins. // This is the primary entrypoint for volume plugins.
@@ -50,7 +50,7 @@ type flockerVolume struct {
// dataset uuid // dataset uuid
datasetUUID string datasetUUID string
//pod *v1.Pod //pod *v1.Pod
flockerClient flockerApi.Clientable flockerClient flockerapi.Clientable
manager volumeManager manager volumeManager
plugin *flockerPlugin plugin *flockerPlugin
mounter mount.Interface mounter mount.Interface
@@ -229,7 +229,7 @@ func (b *flockerVolumeMounter) SetUp(fsGroup *int64) error {
// newFlockerClient uses environment variables and pod attributes to return a // newFlockerClient uses environment variables and pod attributes to return a
// flocker client capable of talking with the Flocker control service. // flocker client capable of talking with the Flocker control service.
func (p *flockerPlugin) newFlockerClient(hostIP string) (*flockerApi.Client, error) { func (p *flockerPlugin) newFlockerClient(hostIP string) (*flockerapi.Client, error) {
host := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_HOST", defaultHost) host := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_HOST", defaultHost)
port, err := env.GetEnvAsIntOrFallback("FLOCKER_CONTROL_SERVICE_PORT", defaultPort) port, err := env.GetEnvAsIntOrFallback("FLOCKER_CONTROL_SERVICE_PORT", defaultPort)
@@ -240,11 +240,11 @@ func (p *flockerPlugin) newFlockerClient(hostIP string) (*flockerApi.Client, err
keyPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_KEY_FILE", defaultClientKeyFile) keyPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_KEY_FILE", defaultClientKeyFile)
certPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_CERT_FILE", defaultClientCertFile) certPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_CERT_FILE", defaultClientCertFile)
c, err := flockerApi.NewClient(host, port, hostIP, caCertPath, keyPath, certPath) c, err := flockerapi.NewClient(host, port, hostIP, caCertPath, keyPath, certPath)
return c, err return c, err
} }
func (b *flockerVolumeMounter) newFlockerClient() (*flockerApi.Client, error) { func (b *flockerVolumeMounter) newFlockerClient() (*flockerapi.Client, error) {
hostIP, err := b.plugin.host.GetHostIP() hostIP, err := b.plugin.host.GetHostIP()
if err != nil { if err != nil {

View File

@@ -28,7 +28,7 @@ import (
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing" volumetest "k8s.io/kubernetes/pkg/volume/testing"
flockerApi "github.com/clusterhq/flocker-go" flockerapi "github.com/clusterhq/flocker-go"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -37,14 +37,14 @@ const datasetOneID = "11111111-1111-1111-1111-111111111100"
const nodeOneID = "11111111-1111-1111-1111-111111111111" const nodeOneID = "11111111-1111-1111-1111-111111111111"
const nodeTwoID = "22222222-2222-2222-2222-222222222222" const nodeTwoID = "22222222-2222-2222-2222-222222222222"
var _ flockerApi.Clientable = &fakeFlockerClient{} var _ flockerapi.Clientable = &fakeFlockerClient{}
type fakeFlockerClient struct { type fakeFlockerClient struct {
DatasetID string DatasetID string
Primary string Primary string
Deleted bool Deleted bool
Metadata map[string]string Metadata map[string]string
Nodes []flockerApi.NodeState Nodes []flockerapi.NodeState
Error error Error error
} }
@@ -54,7 +54,7 @@ func newFakeFlockerClient() *fakeFlockerClient {
Primary: nodeOneID, Primary: nodeOneID,
Deleted: false, Deleted: false,
Metadata: map[string]string{"Name": "dataset-one"}, Metadata: map[string]string{"Name": "dataset-one"},
Nodes: []flockerApi.NodeState{ Nodes: []flockerapi.NodeState{
{ {
Host: "1.2.3.4", Host: "1.2.3.4",
UUID: nodeOneID, UUID: nodeOneID,
@@ -67,13 +67,13 @@ func newFakeFlockerClient() *fakeFlockerClient {
} }
} }
func (c *fakeFlockerClient) CreateDataset(options *flockerApi.CreateDatasetOptions) (*flockerApi.DatasetState, error) { func (c *fakeFlockerClient) CreateDataset(options *flockerapi.CreateDatasetOptions) (*flockerapi.DatasetState, error) {
if c.Error != nil { if c.Error != nil {
return nil, c.Error return nil, c.Error
} }
return &flockerApi.DatasetState{ return &flockerapi.DatasetState{
DatasetID: c.DatasetID, DatasetID: c.DatasetID,
}, nil }, nil
} }
@@ -84,8 +84,8 @@ func (c *fakeFlockerClient) DeleteDataset(datasetID string) error {
return nil return nil
} }
func (c *fakeFlockerClient) GetDatasetState(datasetID string) (*flockerApi.DatasetState, error) { func (c *fakeFlockerClient) GetDatasetState(datasetID string) (*flockerapi.DatasetState, error) {
return &flockerApi.DatasetState{}, nil return &flockerapi.DatasetState{}, nil
} }
func (c *fakeFlockerClient) GetDatasetID(metaName string) (datasetID string, err error) { func (c *fakeFlockerClient) GetDatasetID(metaName string) (datasetID string, err error) {
@@ -99,12 +99,12 @@ func (c *fakeFlockerClient) GetPrimaryUUID() (primaryUUID string, err error) {
return return
} }
func (c *fakeFlockerClient) ListNodes() (nodes []flockerApi.NodeState, err error) { func (c *fakeFlockerClient) ListNodes() (nodes []flockerapi.NodeState, err error) {
return c.Nodes, nil return c.Nodes, nil
} }
func (c *fakeFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerApi.DatasetState, error) { func (c *fakeFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerapi.DatasetState, error) {
return &flockerApi.DatasetState{}, nil return &flockerapi.DatasetState{}, nil
} }
type fakeFlockerUtil struct { type fakeFlockerUtil struct {
@@ -301,7 +301,7 @@ func TestIsReadOnly(t *testing.T) {
type mockFlockerClient struct { type mockFlockerClient struct {
datasetID, primaryUUID, path string datasetID, primaryUUID, path string
datasetState *flockerApi.DatasetState datasetState *flockerapi.DatasetState
} }
func newMockFlockerClient(mockDatasetID, mockPrimaryUUID, mockPath string) *mockFlockerClient { func newMockFlockerClient(mockDatasetID, mockPrimaryUUID, mockPath string) *mockFlockerClient {
@@ -309,7 +309,7 @@ func newMockFlockerClient(mockDatasetID, mockPrimaryUUID, mockPath string) *mock
datasetID: mockDatasetID, datasetID: mockDatasetID,
primaryUUID: mockPrimaryUUID, primaryUUID: mockPrimaryUUID,
path: mockPath, path: mockPath,
datasetState: &flockerApi.DatasetState{ datasetState: &flockerapi.DatasetState{
Path: mockPath, Path: mockPath,
DatasetID: mockDatasetID, DatasetID: mockDatasetID,
Primary: mockPrimaryUUID, Primary: mockPrimaryUUID,
@@ -317,10 +317,10 @@ func newMockFlockerClient(mockDatasetID, mockPrimaryUUID, mockPath string) *mock
} }
} }
func (m mockFlockerClient) CreateDataset(metaName string) (*flockerApi.DatasetState, error) { func (m mockFlockerClient) CreateDataset(metaName string) (*flockerapi.DatasetState, error) {
return m.datasetState, nil return m.datasetState, nil
} }
func (m mockFlockerClient) GetDatasetState(datasetID string) (*flockerApi.DatasetState, error) { func (m mockFlockerClient) GetDatasetState(datasetID string) (*flockerapi.DatasetState, error) {
return m.datasetState, nil return m.datasetState, nil
} }
func (m mockFlockerClient) GetDatasetID(metaName string) (string, error) { func (m mockFlockerClient) GetDatasetID(metaName string) (string, error) {
@@ -329,7 +329,7 @@ func (m mockFlockerClient) GetDatasetID(metaName string) (string, error) {
func (m mockFlockerClient) GetPrimaryUUID() (string, error) { func (m mockFlockerClient) GetPrimaryUUID() (string, error) {
return m.primaryUUID, nil return m.primaryUUID, nil
} }
func (m mockFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerApi.DatasetState, error) { func (m mockFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerapi.DatasetState, error) {
return m.datasetState, nil return m.datasetState, nil
} }

View File

@@ -24,7 +24,7 @@ import (
"k8s.io/kubernetes/pkg/util/rand" "k8s.io/kubernetes/pkg/util/rand"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
flockerApi "github.com/clusterhq/flocker-go" flockerapi "github.com/clusterhq/flocker-go"
"github.com/golang/glog" "github.com/golang/glog"
) )
@@ -75,7 +75,7 @@ func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID
requestBytes := capacity.Value() requestBytes := capacity.Value()
volumeSizeGB = int(volume.RoundUpSize(requestBytes, 1024*1024*1024)) volumeSizeGB = int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
createOptions := &flockerApi.CreateDatasetOptions{ createOptions := &flockerapi.CreateDatasetOptions{
MaximumSize: requestBytes, MaximumSize: requestBytes,
Metadata: map[string]string{ Metadata: map[string]string{
"type": "k8s-dynamic-prov", "type": "k8s-dynamic-prov",

Some files were not shown because too many files have changed in this diff Show More