Merge pull request #34905 from ingvagabund/client-to-clientset
Automatic merge from submit-queue Replace client with clientset Replace client with clientset in some places Fixes: #34637
This commit is contained in:
@@ -47,7 +47,7 @@ func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName
|
||||
var stdout, stderr bytes.Buffer
|
||||
var stdin io.Reader
|
||||
tty := false
|
||||
req := f.Client.RESTClient.Post().
|
||||
req := f.ClientSet.Core().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(podName).
|
||||
Namespace(f.Namespace.Name).
|
||||
|
||||
@@ -39,7 +39,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/metrics"
|
||||
@@ -61,9 +60,6 @@ const (
|
||||
type Framework struct {
|
||||
BaseName string
|
||||
|
||||
// Client is manually created and should not be used unless absolutely necessary. Use ClientSet_1_5
|
||||
// where possible.
|
||||
Client *client.Client
|
||||
// ClientSet uses internal objects, you should use ClientSet_1_5 where possible.
|
||||
ClientSet internalclientset.Interface
|
||||
|
||||
@@ -134,12 +130,12 @@ func NewDefaultGroupVersionFramework(baseName string, groupVersion unversioned.G
|
||||
return f
|
||||
}
|
||||
|
||||
func NewFramework(baseName string, options FrameworkOptions, client *client.Client) *Framework {
|
||||
func NewFramework(baseName string, options FrameworkOptions, client internalclientset.Interface) *Framework {
|
||||
f := &Framework{
|
||||
BaseName: baseName,
|
||||
AddonResourceConstraints: make(map[string]ResourceConstraint),
|
||||
options: options,
|
||||
Client: client,
|
||||
ClientSet: client,
|
||||
}
|
||||
|
||||
BeforeEach(f.BeforeEach)
|
||||
@@ -185,7 +181,7 @@ func (f *Framework) BeforeEach() {
|
||||
// The fact that we need this feels like a bug in ginkgo.
|
||||
// https://github.com/onsi/ginkgo/issues/222
|
||||
f.cleanupHandle = AddCleanupAction(f.AfterEach)
|
||||
if f.Client == nil {
|
||||
if f.ClientSet == nil {
|
||||
By("Creating a kubernetes client")
|
||||
config, err := LoadConfig()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@@ -197,9 +193,6 @@ func (f *Framework) BeforeEach() {
|
||||
if TestContext.KubeAPIContentType != "" {
|
||||
config.ContentType = TestContext.KubeAPIContentType
|
||||
}
|
||||
c, err := loadClientFromConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.Client = c
|
||||
f.ClientSet, err = internalclientset.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.ClientSet_1_5, err = release_1_5.NewForConfig(config)
|
||||
@@ -239,14 +232,14 @@ func (f *Framework) BeforeEach() {
|
||||
|
||||
if TestContext.VerifyServiceAccount {
|
||||
By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = WaitForDefaultServiceAccountInNamespace(f.Client, namespace.Name)
|
||||
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
} else {
|
||||
Logf("Skipping waiting for service account")
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
|
||||
f.gatherer, err = NewResourceUsageGatherer(f.Client, ResourceGathererOptions{
|
||||
f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{
|
||||
inKubemark: ProviderIs("kubemark"),
|
||||
masterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
|
||||
})
|
||||
@@ -261,7 +254,7 @@ func (f *Framework) BeforeEach() {
|
||||
f.logsSizeWaitGroup = sync.WaitGroup{}
|
||||
f.logsSizeWaitGroup.Add(1)
|
||||
f.logsSizeCloseChannel = make(chan bool)
|
||||
f.logsSizeVerifier = NewLogsVerifier(f.Client, f.ClientSet, f.logsSizeCloseChannel)
|
||||
f.logsSizeVerifier = NewLogsVerifier(f.ClientSet, f.logsSizeCloseChannel)
|
||||
go func() {
|
||||
f.logsSizeVerifier.Run()
|
||||
f.logsSizeWaitGroup.Done()
|
||||
@@ -326,7 +319,7 @@ func (f *Framework) AfterEach() {
|
||||
if f.NamespaceDeletionTimeout != 0 {
|
||||
timeout = f.NamespaceDeletionTimeout
|
||||
}
|
||||
if err := deleteNS(f.Client, f.ClientPool, ns.Name, timeout); err != nil {
|
||||
if err := deleteNS(f.ClientSet, f.ClientPool, ns.Name, timeout); err != nil {
|
||||
if !apierrs.IsNotFound(err) {
|
||||
nsDeletionErrors[ns.Name] = err
|
||||
} else {
|
||||
@@ -348,7 +341,7 @@ func (f *Framework) AfterEach() {
|
||||
// Paranoia-- prevent reuse!
|
||||
f.Namespace = nil
|
||||
f.FederationNamespace = nil
|
||||
f.Client = nil
|
||||
f.ClientSet = nil
|
||||
f.namespacesToDelete = nil
|
||||
|
||||
// if we had errors deleting, report them now.
|
||||
@@ -376,18 +369,18 @@ func (f *Framework) AfterEach() {
|
||||
// Print events if the test failed.
|
||||
if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
|
||||
// Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client.
|
||||
DumpAllNamespaceInfo(f.Client, f.ClientSet_1_5, f.Namespace.Name)
|
||||
DumpAllNamespaceInfo(f.ClientSet, f.ClientSet_1_5, f.Namespace.Name)
|
||||
By(fmt.Sprintf("Dumping a list of prepulled images on each node"))
|
||||
LogContainersInPodsWithLabels(f.Client, api.NamespaceSystem, ImagePullerLabels, "image-puller", Logf)
|
||||
LogContainersInPodsWithLabels(f.ClientSet, api.NamespaceSystem, ImagePullerLabels, "image-puller", Logf)
|
||||
if f.federated {
|
||||
// Dump federation events in federation namespace.
|
||||
DumpEventsInNamespace(func(opts v1.ListOptions, ns string) (*v1.EventList, error) {
|
||||
return f.FederationClientset_1_5.Core().Events(ns).List(opts)
|
||||
}, f.FederationNamespace.Name)
|
||||
// Print logs of federation control plane pods (federation-apiserver and federation-controller-manager)
|
||||
LogPodsWithLabels(f.Client, "federation", map[string]string{"app": "federated-cluster"}, Logf)
|
||||
LogPodsWithLabels(f.ClientSet, "federation", map[string]string{"app": "federated-cluster"}, Logf)
|
||||
// Print logs of kube-dns pod
|
||||
LogPodsWithLabels(f.Client, "kube-system", map[string]string{"k8s-app": "kube-dns"}, Logf)
|
||||
LogPodsWithLabels(f.ClientSet, "kube-system", map[string]string{"k8s-app": "kube-dns"}, Logf)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -407,7 +400,7 @@ func (f *Framework) AfterEach() {
|
||||
if TestContext.GatherMetricsAfterTest {
|
||||
By("Gathering metrics")
|
||||
// TODO: enable Scheduler and ControllerManager metrics grabbing when Master's Kubelet will be registered.
|
||||
grabber, err := metrics.NewMetricsGrabber(f.Client, true, false, false, true)
|
||||
grabber, err := metrics.NewMetricsGrabber(f.ClientSet, true, false, false, true)
|
||||
if err != nil {
|
||||
Logf("Failed to create MetricsGrabber. Skipping metrics gathering.")
|
||||
} else {
|
||||
@@ -441,7 +434,7 @@ func (f *Framework) AfterEach() {
|
||||
// Check whether all nodes are ready after the test.
|
||||
// This is explicitly done at the very end of the test, to avoid
|
||||
// e.g. not removing namespace in case of this failure.
|
||||
if err := AllNodesReady(f.Client, 3*time.Minute); err != nil {
|
||||
if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil {
|
||||
Failf("All nodes should be ready after test, %v", err)
|
||||
}
|
||||
}
|
||||
@@ -451,7 +444,7 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
|
||||
if createTestingNS == nil {
|
||||
createTestingNS = CreateTestingNS
|
||||
}
|
||||
ns, err := createTestingNS(baseName, f.Client, labels)
|
||||
ns, err := createTestingNS(baseName, f.ClientSet, labels)
|
||||
if err == nil {
|
||||
f.namespacesToDelete = append(f.namespacesToDelete, ns)
|
||||
}
|
||||
@@ -483,29 +476,29 @@ func (f *Framework) createFederationNamespace(baseName string) (*v1.Namespace, e
|
||||
|
||||
// WaitForPodTerminated waits for the pod to be terminated with the given reason.
|
||||
func (f *Framework) WaitForPodTerminated(podName, reason string) error {
|
||||
return waitForPodTerminatedInNamespace(f.Client, podName, reason, f.Namespace.Name)
|
||||
return waitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// WaitForPodRunning waits for the pod to run in the namespace.
|
||||
func (f *Framework) WaitForPodRunning(podName string) error {
|
||||
return WaitForPodNameRunningInNamespace(f.Client, podName, f.Namespace.Name)
|
||||
return WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// WaitForPodReady waits for the pod to flip to ready in the namespace.
|
||||
func (f *Framework) WaitForPodReady(podName string) error {
|
||||
return waitTimeoutForPodReadyInNamespace(f.Client, podName, f.Namespace.Name, "", PodStartTimeout)
|
||||
return waitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, "", PodStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodRunningSlow waits for the pod to run in the namespace.
|
||||
// It has a longer timeout then WaitForPodRunning (util.slowPodStartTimeout).
|
||||
func (f *Framework) WaitForPodRunningSlow(podName string) error {
|
||||
return waitForPodRunningInNamespaceSlow(f.Client, podName, f.Namespace.Name, "")
|
||||
return waitForPodRunningInNamespaceSlow(f.ClientSet, podName, f.Namespace.Name, "")
|
||||
}
|
||||
|
||||
// WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either
|
||||
// success or failure.
|
||||
func (f *Framework) WaitForPodNoLongerRunning(podName string) error {
|
||||
return WaitForPodNoLongerRunningInNamespace(f.Client, podName, f.Namespace.Name, "")
|
||||
return WaitForPodNoLongerRunningInNamespace(f.ClientSet, podName, f.Namespace.Name, "")
|
||||
}
|
||||
|
||||
// TestContainerOutput runs the given pod in the given namespace and waits
|
||||
@@ -528,7 +521,7 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
|
||||
for {
|
||||
// TODO: Endpoints client should take a field selector so we
|
||||
// don't have to list everything.
|
||||
list, err := f.Client.Endpoints(f.Namespace.Name).List(api.ListOptions{})
|
||||
list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -547,7 +540,7 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
|
||||
FieldSelector: fields.Set{"metadata.name": serviceName}.AsSelector(),
|
||||
ResourceVersion: rv,
|
||||
}
|
||||
w, err := f.Client.Endpoints(f.Namespace.Name).Watch(options)
|
||||
w, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -613,7 +606,7 @@ func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int,
|
||||
theService := f.CreateServiceForSimpleApp(contPort, svcPort, appName)
|
||||
f.CreatePodsPerNodeForSimpleApp(appName, podSpec, count)
|
||||
if block {
|
||||
err = testutils.WaitForPodsWithLabelRunning(f.Client, f.Namespace.Name, labels.SelectorFromSet(labels.Set(theService.Spec.Selector)))
|
||||
err = testutils.WaitForPodsWithLabelRunning(f.ClientSet, f.Namespace.Name, labels.SelectorFromSet(labels.Set(theService.Spec.Selector)))
|
||||
}
|
||||
return err, theService
|
||||
}
|
||||
@@ -641,7 +634,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
|
||||
}
|
||||
}
|
||||
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
|
||||
service, err := f.Client.Services(f.Namespace.Name).Create(&api.Service{
|
||||
service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "service-for-" + appName,
|
||||
Labels: map[string]string{
|
||||
@@ -667,7 +660,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
|
||||
// one per node, but no more than maxCount.
|
||||
if i <= maxCount {
|
||||
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
||||
_, err := f.Client.Pods(f.Namespace.Name).Create(&api.Pod{
|
||||
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: fmt.Sprintf(appName+"-pod-%v", i),
|
||||
Labels: labels,
|
||||
@@ -852,14 +845,14 @@ type PodStateVerification struct {
|
||||
}
|
||||
|
||||
type ClusterVerification struct {
|
||||
client *client.Client
|
||||
client internalclientset.Interface
|
||||
namespace *api.Namespace // pointer rather than string, since ns isn't created until before each.
|
||||
podState PodStateVerification
|
||||
}
|
||||
|
||||
func (f *Framework) NewClusterVerification(filter PodStateVerification) *ClusterVerification {
|
||||
return &ClusterVerification{
|
||||
f.Client,
|
||||
f.ClientSet,
|
||||
f.Namespace,
|
||||
filter,
|
||||
}
|
||||
@@ -894,7 +887,7 @@ func passesPhasesFilter(pod api.Pod, validPhases []api.PodPhase) bool {
|
||||
}
|
||||
|
||||
// filterLabels returns a list of pods which have labels.
|
||||
func filterLabels(selectors map[string]string, cli *client.Client, ns string) (*api.PodList, error) {
|
||||
func filterLabels(selectors map[string]string, cli internalclientset.Interface, ns string) (*api.PodList, error) {
|
||||
var err error
|
||||
var selector labels.Selector
|
||||
var pl *api.PodList
|
||||
@@ -903,9 +896,9 @@ func filterLabels(selectors map[string]string, cli *client.Client, ns string) (*
|
||||
if len(selectors) > 0 {
|
||||
selector = labels.SelectorFromSet(labels.Set(selectors))
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
pl, err = cli.Pods(ns).List(options)
|
||||
pl, err = cli.Core().Pods(ns).List(options)
|
||||
} else {
|
||||
pl, err = cli.Pods(ns).List(api.ListOptions{})
|
||||
pl, err = cli.Core().Pods(ns).List(api.ListOptions{})
|
||||
}
|
||||
return pl, err
|
||||
}
|
||||
@@ -913,7 +906,7 @@ func filterLabels(selectors map[string]string, cli *client.Client, ns string) (*
|
||||
// filter filters pods which pass a filter. It can be used to compose
|
||||
// the more useful abstractions like ForEach, WaitFor, and so on, which
|
||||
// can be used directly by tests.
|
||||
func (p *PodStateVerification) filter(c *client.Client, namespace *api.Namespace) ([]api.Pod, error) {
|
||||
func (p *PodStateVerification) filter(c internalclientset.Interface, namespace *api.Namespace) ([]api.Pod, error) {
|
||||
if len(p.ValidPhases) == 0 || namespace == nil {
|
||||
panic(fmt.Errorf("Need to specify a valid pod phases (%v) and namespace (%v). ", p.ValidPhases, namespace))
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
kubeletstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
@@ -63,7 +63,7 @@ func (a KubeletLatencyMetrics) Less(i, j int) bool { return a[i].Latency > a[j].
|
||||
|
||||
// If a apiserver client is passed in, the function will try to get kubelet metrics from metrics grabber;
|
||||
// or else, the function will try to get kubelet metrics directly from the node.
|
||||
func getKubeletMetricsFromNode(c *client.Client, nodeName string) (metrics.KubeletMetrics, error) {
|
||||
func getKubeletMetricsFromNode(c clientset.Interface, nodeName string) (metrics.KubeletMetrics, error) {
|
||||
if c == nil {
|
||||
return metrics.GrabKubeletMetricsWithoutProxy(nodeName)
|
||||
}
|
||||
@@ -76,7 +76,7 @@ func getKubeletMetricsFromNode(c *client.Client, nodeName string) (metrics.Kubel
|
||||
|
||||
// getKubeletMetrics gets all metrics in kubelet subsystem from specified node and trims
|
||||
// the subsystem prefix.
|
||||
func getKubeletMetrics(c *client.Client, nodeName string) (metrics.KubeletMetrics, error) {
|
||||
func getKubeletMetrics(c clientset.Interface, nodeName string) (metrics.KubeletMetrics, error) {
|
||||
ms, err := getKubeletMetricsFromNode(c, nodeName)
|
||||
if err != nil {
|
||||
return metrics.KubeletMetrics{}, err
|
||||
@@ -138,7 +138,7 @@ func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
|
||||
|
||||
// RuntimeOperationMonitor is the tool getting and parsing docker operation metrics.
|
||||
type RuntimeOperationMonitor struct {
|
||||
client *client.Client
|
||||
client clientset.Interface
|
||||
nodesRuntimeOps map[string]NodeRuntimeOperationErrorRate
|
||||
}
|
||||
|
||||
@@ -152,12 +152,12 @@ type RuntimeOperationErrorRate struct {
|
||||
TimeoutRate float64
|
||||
}
|
||||
|
||||
func NewRuntimeOperationMonitor(c *client.Client) *RuntimeOperationMonitor {
|
||||
func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor {
|
||||
m := &RuntimeOperationMonitor{
|
||||
client: c,
|
||||
nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate),
|
||||
}
|
||||
nodes, err := m.client.Nodes().List(api.ListOptions{})
|
||||
nodes, err := m.client.Core().Nodes().List(api.ListOptions{})
|
||||
if err != nil {
|
||||
Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err)
|
||||
}
|
||||
@@ -224,7 +224,7 @@ func FormatRuntimeOperationErrorRate(nodesResult map[string]NodeRuntimeOperation
|
||||
}
|
||||
|
||||
// getNodeRuntimeOperationErrorRate gets runtime operation error rate from specified node.
|
||||
func getNodeRuntimeOperationErrorRate(c *client.Client, node string) (NodeRuntimeOperationErrorRate, error) {
|
||||
func getNodeRuntimeOperationErrorRate(c clientset.Interface, node string) (NodeRuntimeOperationErrorRate, error) {
|
||||
result := make(NodeRuntimeOperationErrorRate)
|
||||
ms, err := getKubeletMetrics(c, node)
|
||||
if err != nil {
|
||||
@@ -256,7 +256,7 @@ func getNodeRuntimeOperationErrorRate(c *client.Client, node string) (NodeRuntim
|
||||
}
|
||||
|
||||
// HighLatencyKubeletOperations logs and counts the high latency metrics exported by the kubelet server via /metrics.
|
||||
func HighLatencyKubeletOperations(c *client.Client, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) {
|
||||
func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) {
|
||||
ms, err := getKubeletMetrics(c, nodeName)
|
||||
if err != nil {
|
||||
return KubeletLatencyMetrics{}, err
|
||||
@@ -278,19 +278,19 @@ func HighLatencyKubeletOperations(c *client.Client, threshold time.Duration, nod
|
||||
// in the returned ContainerInfo is subject to the requirements in statsRequest.
|
||||
// TODO: This function uses the deprecated kubelet stats API; it should be
|
||||
// removed.
|
||||
func getContainerInfo(c *client.Client, nodeName string, req *kubeletstats.StatsRequest) (map[string]cadvisorapi.ContainerInfo, error) {
|
||||
func getContainerInfo(c clientset.Interface, nodeName string, req *kubeletstats.StatsRequest) (map[string]cadvisorapi.ContainerInfo, error) {
|
||||
reqBody, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c)
|
||||
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var data []byte
|
||||
if subResourceProxyAvailable {
|
||||
data, err = c.Post().
|
||||
data, err = c.Core().RESTClient().Post().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
@@ -300,7 +300,7 @@ func getContainerInfo(c *client.Client, nodeName string, req *kubeletstats.Stats
|
||||
Do().Raw()
|
||||
|
||||
} else {
|
||||
data, err = c.Post().
|
||||
data, err = c.Core().RESTClient().Post().
|
||||
Prefix("proxy").
|
||||
Resource("nodes").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
@@ -344,7 +344,7 @@ func getContainerInfo(c *client.Client, nodeName string, req *kubeletstats.Stats
|
||||
// TODO: This function relies on the deprecated kubelet stats API and should be
|
||||
// removed and/or rewritten.
|
||||
func getOneTimeResourceUsageOnNode(
|
||||
c *client.Client,
|
||||
c clientset.Interface,
|
||||
nodeName string,
|
||||
cpuInterval time.Duration,
|
||||
containerNames func() []string,
|
||||
@@ -400,15 +400,15 @@ func getOneTimeResourceUsageOnNode(
|
||||
return usageMap, nil
|
||||
}
|
||||
|
||||
func getNodeStatsSummary(c *client.Client, nodeName string) (*stats.Summary, error) {
|
||||
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c)
|
||||
func getNodeStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) {
|
||||
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var data []byte
|
||||
if subResourceProxyAvailable {
|
||||
data, err = c.Get().
|
||||
data, err = c.Core().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
@@ -417,7 +417,7 @@ func getNodeStatsSummary(c *client.Client, nodeName string) (*stats.Summary, err
|
||||
Do().Raw()
|
||||
|
||||
} else {
|
||||
data, err = c.Get().
|
||||
data, err = c.Core().RESTClient().Get().
|
||||
Prefix("proxy").
|
||||
Resource("nodes").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
@@ -515,7 +515,7 @@ type usageDataPerContainer struct {
|
||||
memWorkSetData []uint64
|
||||
}
|
||||
|
||||
func GetKubeletHeapStats(c *client.Client, nodeName string) (string, error) {
|
||||
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
|
||||
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap")
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -531,7 +531,7 @@ func GetKubeletHeapStats(c *client.Client, nodeName string) (string, error) {
|
||||
return strings.Join(lines[len(lines)-numLines:], "\n"), nil
|
||||
}
|
||||
|
||||
func PrintAllKubeletPods(c *client.Client, nodeName string) {
|
||||
func PrintAllKubeletPods(c clientset.Interface, nodeName string) {
|
||||
podList, err := GetKubeletPods(c, nodeName)
|
||||
if err != nil {
|
||||
Logf("Unable to retrieve kubelet pods for node %v: %v", nodeName, err)
|
||||
@@ -565,13 +565,13 @@ type resourceCollector struct {
|
||||
lock sync.RWMutex
|
||||
node string
|
||||
containers []string
|
||||
client *client.Client
|
||||
client clientset.Interface
|
||||
buffers map[string][]*ContainerResourceUsage
|
||||
pollingInterval time.Duration
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
func newResourceCollector(c *client.Client, nodeName string, containerNames []string, pollingInterval time.Duration) *resourceCollector {
|
||||
func newResourceCollector(c clientset.Interface, nodeName string, containerNames []string, pollingInterval time.Duration) *resourceCollector {
|
||||
buffers := make(map[string][]*ContainerResourceUsage)
|
||||
return &resourceCollector{
|
||||
node: nodeName,
|
||||
@@ -679,13 +679,13 @@ func (r *resourceCollector) GetBasicCPUStats(containerName string) map[float64]f
|
||||
|
||||
// ResourceMonitor manages a resourceCollector per node.
|
||||
type ResourceMonitor struct {
|
||||
client *client.Client
|
||||
client clientset.Interface
|
||||
containers []string
|
||||
pollingInterval time.Duration
|
||||
collectors map[string]*resourceCollector
|
||||
}
|
||||
|
||||
func NewResourceMonitor(c *client.Client, containerNames []string, pollingInterval time.Duration) *ResourceMonitor {
|
||||
func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingInterval time.Duration) *ResourceMonitor {
|
||||
return &ResourceMonitor{
|
||||
containers: containerNames,
|
||||
client: c,
|
||||
@@ -695,7 +695,7 @@ func NewResourceMonitor(c *client.Client, containerNames []string, pollingInterv
|
||||
|
||||
func (r *ResourceMonitor) Start() {
|
||||
// It should be OK to monitor unschedulable Nodes
|
||||
nodes, err := r.client.Nodes().List(api.ListOptions{})
|
||||
nodes, err := r.client.Core().Nodes().List(api.ListOptions{})
|
||||
if err != nil {
|
||||
Failf("ResourceMonitor: unable to get list of nodes: %v", err)
|
||||
}
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -65,8 +64,7 @@ type LogSizeGatherer struct {
|
||||
// LogsSizeVerifier gathers data about log files sizes from master and node machines.
|
||||
// It oversees a <workersNo> workers which do the gathering.
|
||||
type LogsSizeVerifier struct {
|
||||
client *client.Client
|
||||
clientset clientset.Interface
|
||||
client clientset.Interface
|
||||
stopChannel chan bool
|
||||
// data stores LogSizeData groupped per IP and log_path
|
||||
data *LogsSizeData
|
||||
@@ -144,8 +142,8 @@ func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int
|
||||
}
|
||||
|
||||
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
|
||||
func NewLogsVerifier(c *client.Client, cs clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
|
||||
nodeAddresses, err := NodeSSHHosts(cs)
|
||||
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
|
||||
nodeAddresses, err := NodeSSHHosts(c)
|
||||
ExpectNoError(err)
|
||||
masterAddress := GetMasterHost() + ":22"
|
||||
|
||||
@@ -154,7 +152,6 @@ func NewLogsVerifier(c *client.Client, cs clientset.Interface, stopChannel chan
|
||||
|
||||
verifier := &LogsSizeVerifier{
|
||||
client: c,
|
||||
clientset: cs,
|
||||
stopChannel: stopChannel,
|
||||
data: prepareData(masterAddress, nodeAddresses),
|
||||
masterAddress: masterAddress,
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/pkg/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
@@ -205,7 +205,7 @@ func setQuantile(metric *LatencyMetric, quantile float64, latency time.Duration)
|
||||
}
|
||||
}
|
||||
|
||||
func readLatencyMetrics(c *client.Client) (APIResponsiveness, error) {
|
||||
func readLatencyMetrics(c clientset.Interface) (APIResponsiveness, error) {
|
||||
var a APIResponsiveness
|
||||
|
||||
body, err := getMetrics(c)
|
||||
@@ -247,7 +247,7 @@ func readLatencyMetrics(c *client.Client) (APIResponsiveness, error) {
|
||||
|
||||
// Prints top five summary metrics for request types with latency and returns
|
||||
// number of such request types above threshold.
|
||||
func HighLatencyRequests(c *client.Client) (int, error) {
|
||||
func HighLatencyRequests(c clientset.Interface) (int, error) {
|
||||
metrics, err := readLatencyMetrics(c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@@ -297,9 +297,9 @@ func VerifyPodStartupLatency(latency PodStartupLatency) error {
|
||||
}
|
||||
|
||||
// Resets latency metrics in apiserver.
|
||||
func ResetMetrics(c *client.Client) error {
|
||||
func ResetMetrics(c clientset.Interface) error {
|
||||
Logf("Resetting latency metrics in apiserver...")
|
||||
body, err := c.Delete().AbsPath("/metrics").DoRaw()
|
||||
body, err := c.Core().RESTClient().Delete().AbsPath("/metrics").DoRaw()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -310,8 +310,8 @@ func ResetMetrics(c *client.Client) error {
|
||||
}
|
||||
|
||||
// Retrieves metrics information.
|
||||
func getMetrics(c *client.Client) (string, error) {
|
||||
body, err := c.Get().AbsPath("/metrics").DoRaw()
|
||||
func getMetrics(c clientset.Interface) (string, error) {
|
||||
body, err := c.Core().RESTClient().Get().AbsPath("/metrics").DoRaw()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -319,11 +319,11 @@ func getMetrics(c *client.Client) (string, error) {
|
||||
}
|
||||
|
||||
// Retrieves scheduler metrics information.
|
||||
func getSchedulingLatency(c *client.Client) (SchedulingLatency, error) {
|
||||
func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) {
|
||||
result := SchedulingLatency{}
|
||||
|
||||
// Check if master Node is registered
|
||||
nodes, err := c.Nodes().List(api.ListOptions{})
|
||||
nodes, err := c.Core().Nodes().List(api.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
|
||||
var data string
|
||||
@@ -334,7 +334,7 @@ func getSchedulingLatency(c *client.Client) (SchedulingLatency, error) {
|
||||
}
|
||||
}
|
||||
if masterRegistered {
|
||||
rawData, err := c.Get().
|
||||
rawData, err := c.Core().RESTClient().Get().
|
||||
Prefix("proxy").
|
||||
Namespace(api.NamespaceSystem).
|
||||
Resource("pods").
|
||||
@@ -383,7 +383,7 @@ func getSchedulingLatency(c *client.Client) (SchedulingLatency, error) {
|
||||
}
|
||||
|
||||
// Verifies (currently just by logging them) the scheduling latencies.
|
||||
func VerifySchedulerLatency(c *client.Client) error {
|
||||
func VerifySchedulerLatency(c clientset.Interface) error {
|
||||
latency, err := getSchedulingLatency(c)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -457,7 +457,7 @@ func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric {
|
||||
|
||||
// LogSuspiciousLatency logs metrics/docker errors from all nodes that had slow startup times
|
||||
// If latencyDataLag is nil then it will be populated from latencyData
|
||||
func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLatencyData, nodeCount int, c *client.Client) {
|
||||
func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLatencyData, nodeCount int, c clientset.Interface) {
|
||||
if latencyDataLag == nil {
|
||||
latencyDataLag = latencyData
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/rand"
|
||||
@@ -372,7 +372,7 @@ func (config *NetworkingTestConfig) createNodePortService(selector map[string]st
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) DeleteNodePortService() {
|
||||
err := config.getServiceClient().Delete(config.NodePortService.Name)
|
||||
err := config.getServiceClient().Delete(config.NodePortService.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "error while deleting NodePortService. err:%v)", err)
|
||||
time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted.
|
||||
}
|
||||
@@ -403,7 +403,7 @@ func (config *NetworkingTestConfig) createService(serviceSpec *api.Service) *api
|
||||
_, err := config.getServiceClient().Create(serviceSpec)
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
|
||||
|
||||
err = WaitForService(config.f.Client, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second)
|
||||
err = WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second)
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err))
|
||||
|
||||
createdService, err := config.getServiceClient().Get(serviceSpec.Name)
|
||||
@@ -431,7 +431,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
|
||||
config.setupCore(selector)
|
||||
|
||||
By("Getting node addresses")
|
||||
ExpectNoError(WaitForAllNodesSchedulable(config.f.Client))
|
||||
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet))
|
||||
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
|
||||
config.ExternalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
|
||||
if len(config.ExternalAddrs) < 2 {
|
||||
@@ -464,7 +464,7 @@ func (config *NetworkingTestConfig) cleanup() {
|
||||
if err == nil {
|
||||
for _, ns := range nsList.Items {
|
||||
if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.Namespace {
|
||||
nsClient.Delete(ns.Name)
|
||||
nsClient.Delete(ns.Name, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -482,7 +482,7 @@ func shuffleNodes(nodes []api.Node) []api.Node {
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
|
||||
ExpectNoError(WaitForAllNodesSchedulable(config.f.Client))
|
||||
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet))
|
||||
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
|
||||
|
||||
// To make this test work reasonably fast in large clusters,
|
||||
@@ -520,12 +520,12 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod() {
|
||||
config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0))
|
||||
config.EndpointPods = config.EndpointPods[1:]
|
||||
// wait for pod being deleted.
|
||||
err := WaitForPodToDisappear(config.f.Client, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
|
||||
err := WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
|
||||
if err != nil {
|
||||
Failf("Failed to delete %s pod: %v", pod.Name, err)
|
||||
}
|
||||
// wait for endpoint being removed.
|
||||
err = WaitForServiceEndpointsNum(config.f.Client, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout)
|
||||
err = WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout)
|
||||
if err != nil {
|
||||
Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
|
||||
}
|
||||
@@ -544,10 +544,10 @@ func (config *NetworkingTestConfig) getPodClient() *PodClient {
|
||||
return config.podClient
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) getServiceClient() client.ServiceInterface {
|
||||
return config.f.Client.Services(config.Namespace)
|
||||
func (config *NetworkingTestConfig) getServiceClient() coreclientset.ServiceInterface {
|
||||
return config.f.ClientSet.Core().Services(config.Namespace)
|
||||
}
|
||||
|
||||
func (config *NetworkingTestConfig) getNamespacesClient() client.NamespaceInterface {
|
||||
return config.f.Client.Namespaces()
|
||||
func (config *NetworkingTestConfig) getNamespacesClient() coreclientset.NamespaceInterface {
|
||||
return config.f.ClientSet.Core().Namespaces()
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
@@ -82,7 +82,7 @@ var NodeUpgrade = func(f *Framework, v string, img string) error {
|
||||
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
|
||||
// GKE; the operation shouldn't return until they all are.
|
||||
Logf("Waiting up to %v for all nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout)
|
||||
if _, err := CheckNodesReady(f.Client, RestartNodeReadyAgainTimeout, TestContext.CloudConfig.NumNodes); err != nil {
|
||||
if _, err := CheckNodesReady(f.ClientSet, RestartNodeReadyAgainTimeout, TestContext.CloudConfig.NumNodes); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -139,7 +139,7 @@ func nodeUpgradeGKE(v string, img string) error {
|
||||
// CheckNodesReady waits up to nt for expect nodes accessed by c to be ready,
|
||||
// returning an error if this doesn't happen in time. It returns the names of
|
||||
// nodes it finds.
|
||||
func CheckNodesReady(c *client.Client, nt time.Duration, expect int) ([]string, error) {
|
||||
func CheckNodesReady(c clientset.Interface, nt time.Duration, expect int) ([]string, error) {
|
||||
// First, keep getting all of the nodes until we get the number we expect.
|
||||
var nodeList *api.NodeList
|
||||
var errLast error
|
||||
@@ -148,7 +148,7 @@ func CheckNodesReady(c *client.Client, nt time.Duration, expect int) ([]string,
|
||||
// A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver
|
||||
// knows about all of the nodes. Thus, we retry the list nodes call
|
||||
// until we get the expected number of nodes.
|
||||
nodeList, errLast = c.Nodes().List(api.ListOptions{
|
||||
nodeList, errLast = c.Core().Nodes().List(api.ListOptions{
|
||||
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector()})
|
||||
if errLast != nil {
|
||||
return false, nil
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@@ -43,13 +43,13 @@ var ImageWhiteList sets.String
|
||||
func (f *Framework) PodClient() *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.Client.Pods(f.Namespace.Name),
|
||||
PodInterface: f.ClientSet.Core().Pods(f.Namespace.Name),
|
||||
}
|
||||
}
|
||||
|
||||
type PodClient struct {
|
||||
f *Framework
|
||||
unversioned.PodInterface
|
||||
unversionedcore.PodInterface
|
||||
}
|
||||
|
||||
// Create creates a new pod according to the framework specifications (don't wait for it to start).
|
||||
@@ -116,7 +116,7 @@ func (c *PodClient) DeleteSync(name string, options *api.DeleteOptions, timeout
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
Failf("Failed to delete pod %q: %v", name, err)
|
||||
}
|
||||
Expect(WaitForPodToDisappear(c.f.Client, c.f.Namespace.Name, name, labels.Everything(),
|
||||
Expect(WaitForPodToDisappear(c.f.ClientSet, c.f.Namespace.Name, name, labels.Everything(),
|
||||
2*time.Second, timeout)).To(Succeed(), "wait for pod %q to disappear", name)
|
||||
}
|
||||
|
||||
@@ -156,7 +156,7 @@ func (c *PodClient) mungeSpec(pod *api.Pod) {
|
||||
// WaitForSuccess waits for pod to success.
|
||||
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
Expect(waitForPodCondition(f.Client, f.Namespace.Name, name, "success or failure", timeout,
|
||||
Expect(waitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
func(pod *api.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case api.PodFailed:
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
)
|
||||
@@ -129,7 +129,7 @@ func leftMergeData(left, right map[int]ResourceUsagePerContainer) map[int]Resour
|
||||
}
|
||||
|
||||
type resourceGatherWorker struct {
|
||||
c *client.Client
|
||||
c clientset.Interface
|
||||
nodeName string
|
||||
wg *sync.WaitGroup
|
||||
containerIDToNameMap map[string]string
|
||||
@@ -204,7 +204,7 @@ func getKubemarkMasterComponentsResourceUsage() ResourceUsagePerContainer {
|
||||
return result
|
||||
}
|
||||
|
||||
func (g *containerResourceGatherer) getKubeSystemContainersResourceUsage(c *client.Client) {
|
||||
func (g *containerResourceGatherer) getKubeSystemContainersResourceUsage(c clientset.Interface) {
|
||||
if len(g.workers) == 0 {
|
||||
return
|
||||
}
|
||||
@@ -218,7 +218,7 @@ func (g *containerResourceGatherer) getKubeSystemContainersResourceUsage(c *clie
|
||||
}
|
||||
|
||||
type containerResourceGatherer struct {
|
||||
client *client.Client
|
||||
client clientset.Interface
|
||||
stopCh chan struct{}
|
||||
workers []resourceGatherWorker
|
||||
workerWg sync.WaitGroup
|
||||
@@ -232,7 +232,7 @@ type ResourceGathererOptions struct {
|
||||
masterOnly bool
|
||||
}
|
||||
|
||||
func NewResourceUsageGatherer(c *client.Client, options ResourceGathererOptions) (*containerResourceGatherer, error) {
|
||||
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions) (*containerResourceGatherer, error) {
|
||||
g := containerResourceGatherer{
|
||||
client: c,
|
||||
stopCh: make(chan struct{}),
|
||||
@@ -250,7 +250,7 @@ func NewResourceUsageGatherer(c *client.Client, options ResourceGathererOptions)
|
||||
finished: false,
|
||||
})
|
||||
} else {
|
||||
pods, err := c.Pods("kube-system").List(api.ListOptions{})
|
||||
pods, err := c.Core().Pods("kube-system").List(api.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error while listing Pods: %v", err)
|
||||
return nil, err
|
||||
@@ -262,7 +262,7 @@ func NewResourceUsageGatherer(c *client.Client, options ResourceGathererOptions)
|
||||
g.containerIDs = append(g.containerIDs, containerID)
|
||||
}
|
||||
}
|
||||
nodeList, err := c.Nodes().List(api.ListOptions{})
|
||||
nodeList, err := c.Core().Nodes().List(api.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error while listing Nodes: %v", err)
|
||||
return nil, err
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user