Merge pull request #10487 from satnam6502/sys-namespace
Move cluster level services to the kube-system namespace
This commit is contained in:
@@ -188,20 +188,20 @@ var _ = Describe("DNS", func() {
|
||||
// TODO: support DNS on vagrant #3580
|
||||
SkipIfProviderIs("vagrant")
|
||||
|
||||
podClient := f.Client.Pods(api.NamespaceDefault)
|
||||
|
||||
systemClient := f.Client.Pods(api.NamespaceSystem)
|
||||
By("Waiting for DNS Service to be Running")
|
||||
dnsPods, err := podClient.List(dnsServiceLableSelector, fields.Everything())
|
||||
dnsPods, err := systemClient.List(dnsServiceLableSelector, fields.Everything())
|
||||
if err != nil {
|
||||
Failf("Failed to list all dns service pods")
|
||||
}
|
||||
if len(dnsPods.Items) != 1 {
|
||||
Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLableSelector.String())
|
||||
}
|
||||
expectNoError(waitForPodRunning(f.Client, dnsPods.Items[0].Name))
|
||||
expectNoError(waitForPodRunningInNamespace(f.Client, dnsPods.Items[0].Name, api.NamespaceSystem))
|
||||
|
||||
// All the names we need to be able to resolve.
|
||||
// TODO: Spin up a separate test service and test that dns works for that service.
|
||||
// TODO: Should these be changed to kubernetes.kube-system etc. ?
|
||||
namesToResolve := []string{
|
||||
"kubernetes.default",
|
||||
"kubernetes.default.svc",
|
||||
@@ -227,17 +227,17 @@ var _ = Describe("DNS", func() {
|
||||
// TODO: support DNS on vagrant #3580
|
||||
SkipIfProviderIs("vagrant")
|
||||
|
||||
podClient := f.Client.Pods(api.NamespaceDefault)
|
||||
systemClient := f.Client.Pods(api.NamespaceSystem)
|
||||
|
||||
By("Waiting for DNS Service to be Running")
|
||||
dnsPods, err := podClient.List(dnsServiceLableSelector, fields.Everything())
|
||||
dnsPods, err := systemClient.List(dnsServiceLableSelector, fields.Everything())
|
||||
if err != nil {
|
||||
Failf("Failed to list all dns service pods")
|
||||
}
|
||||
if len(dnsPods.Items) != 1 {
|
||||
Failf("Unexpected number of pods (%d) matches the label selector %v", len(dnsPods.Items), dnsServiceLableSelector.String())
|
||||
}
|
||||
expectNoError(waitForPodRunning(f.Client, dnsPods.Items[0].Name))
|
||||
expectNoError(waitForPodRunningInNamespace(f.Client, dnsPods.Items[0].Name, api.NamespaceSystem))
|
||||
|
||||
// Create a test headless service.
|
||||
By("Creating a test headless service")
|
||||
|
@@ -126,7 +126,7 @@ func TestE2E(t *testing.T) {
|
||||
// cluster infrastructure pods that are being pulled or started can block
|
||||
// test pods from running, and tests that ensure all pods are running and
|
||||
// ready will fail).
|
||||
if err := waitForPodsRunningReady(api.NamespaceDefault, testContext.MinStartupPods, podStartupTimeout); err != nil {
|
||||
if err := waitForPodsRunningReady(api.NamespaceSystem, testContext.MinStartupPods, podStartupTimeout); err != nil {
|
||||
t.Errorf("Error waiting for all pods to be running and ready: %v", err)
|
||||
return
|
||||
}
|
||||
|
@@ -70,7 +70,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
||||
|
||||
// Check for the existence of the Elasticsearch service.
|
||||
By("Checking the Elasticsearch service exists.")
|
||||
s := f.Client.Services(api.NamespaceDefault)
|
||||
s := f.Client.Services(api.NamespaceSystem)
|
||||
// Make a few attempts to connect. This makes the test robust against
|
||||
// being run as the first e2e test just after the e2e cluster has been created.
|
||||
var err error
|
||||
@@ -85,10 +85,10 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
||||
// Wait for the Elasticsearch pods to enter the running state.
|
||||
By("Checking to make sure the Elasticsearch pods are running")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{esKey: esValue}))
|
||||
pods, err := f.Client.Pods(api.NamespaceDefault).List(label, fields.Everything())
|
||||
pods, err := f.Client.Pods(api.NamespaceSystem).List(label, fields.Everything())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods.Items {
|
||||
err = waitForPodRunning(f.Client, pod.Name)
|
||||
err = waitForPodRunningInNamespace(f.Client, pod.Name, api.NamespaceSystem)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
@@ -100,7 +100,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
||||
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
|
||||
// Query against the root URL for Elasticsearch.
|
||||
body, err := f.Client.Get().
|
||||
Namespace(api.NamespaceDefault).
|
||||
Namespace(api.NamespaceSystem).
|
||||
Prefix("proxy").
|
||||
Resource("services").
|
||||
Name("elasticsearch-logging").
|
||||
@@ -146,7 +146,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
||||
var body []byte
|
||||
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
|
||||
body, err = f.Client.Get().
|
||||
Namespace(api.NamespaceDefault).
|
||||
Namespace(api.NamespaceSystem).
|
||||
Prefix("proxy").
|
||||
Resource("services").
|
||||
Name("elasticsearch-logging").
|
||||
@@ -188,7 +188,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
||||
return isNodeReadySetAsExpected(&node, true)
|
||||
})
|
||||
if len(nodes.Items) < 2 {
|
||||
Failf("Less than two nodes were found Ready.")
|
||||
Failf("Less than two nodes were found Ready: %d", len(nodes.Items))
|
||||
}
|
||||
Logf("Found %d healthy nodes.", len(nodes.Items))
|
||||
|
||||
@@ -257,7 +257,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
||||
for start := time.Now(); time.Since(start) < ingestionTimeout; time.Sleep(10 * time.Second) {
|
||||
|
||||
// Debugging code to report the status of the elasticsearch logging endpoints.
|
||||
esPods, err := f.Client.Pods(api.NamespaceDefault).List(labels.Set{esKey: esValue}.AsSelector(), fields.Everything())
|
||||
esPods, err := f.Client.Pods(api.NamespaceSystem).List(labels.Set{esKey: esValue}.AsSelector(), fields.Everything())
|
||||
if err != nil {
|
||||
Logf("Attempt to list Elasticsearch nodes encountered a problem -- may retry: %v", err)
|
||||
continue
|
||||
@@ -272,7 +272,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
||||
// verison of the name. Ask for twice as many log lines as we expect to check for
|
||||
// duplication bugs.
|
||||
body, err = f.Client.Get().
|
||||
Namespace(api.NamespaceDefault).
|
||||
Namespace(api.NamespaceSystem).
|
||||
Prefix("proxy").
|
||||
Resource("services").
|
||||
Name("elasticsearch-logging").
|
||||
|
@@ -78,7 +78,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
|
||||
// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
|
||||
// is running (which would be an error except during a rolling update).
|
||||
for _, rcLabel := range rcLabels {
|
||||
rcList, err := c.ReplicationControllers(api.NamespaceDefault).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
|
||||
rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -87,7 +87,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
|
||||
rcLabel, len(rcList.Items))
|
||||
}
|
||||
for _, rc := range rcList.Items {
|
||||
podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
|
||||
podList, err := c.Pods(api.NamespaceSystem).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -100,7 +100,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
|
||||
}
|
||||
|
||||
func expectedServicesExist(c *client.Client) error {
|
||||
serviceList, err := c.Services(api.NamespaceDefault).List(labels.Everything())
|
||||
serviceList, err := c.Services(api.NamespaceSystem).List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -205,7 +205,7 @@ func testMonitoringUsingHeapsterInfluxdb(c *client.Client) {
|
||||
if !ok {
|
||||
Failf("failed to get master http client")
|
||||
}
|
||||
proxyUrl := fmt.Sprintf("%s/api/v1/proxy/namespaces/default/services/%s:api/", getMasterHost(), influxdbService)
|
||||
proxyUrl := fmt.Sprintf("%s/api/v1/proxy/namespaces/%s/services/%s:api/", getMasterHost(), api.NamespaceSystem, influxdbService)
|
||||
config := &influxdb.ClientConfig{
|
||||
Host: proxyUrl,
|
||||
// TODO(vishh): Infer username and pw from the Pod spec.
|
||||
|
Reference in New Issue
Block a user