Merge pull request #27019 from girishkalele/nethealth2saltbase
Automatic merge from submit-queue Add nethealth container to prepull manifest
This commit is contained in:
		@@ -35,6 +35,18 @@ spec:
 | 
			
		||||
      name: socket
 | 
			
		||||
    - mountPath: /usr/bin/docker
 | 
			
		||||
      name: docker
 | 
			
		||||
  # Add a container that runs a health-check
 | 
			
		||||
  - name: nethealth-check
 | 
			
		||||
    resources:
 | 
			
		||||
      requests:
 | 
			
		||||
        cpu: 100m
 | 
			
		||||
      limits:
 | 
			
		||||
        cpu: 100m
 | 
			
		||||
    image:  gcr.io/google_containers/kube-nethealth-amd64:1.0
 | 
			
		||||
    command:
 | 
			
		||||
    - /bin/sh
 | 
			
		||||
    - -c
 | 
			
		||||
    - "/usr/bin/nethealth || true"
 | 
			
		||||
  volumes:
 | 
			
		||||
  - hostPath:
 | 
			
		||||
      path: /var/run/docker.sock
 | 
			
		||||
@@ -44,3 +56,6 @@ spec:
 | 
			
		||||
    name: docker
 | 
			
		||||
  # This pod is really fire-and-forget.
 | 
			
		||||
  restartPolicy: Never
 | 
			
		||||
  # This pod needs hostNetworking for true VM perf measurement as well as avoiding cbr0 issues
 | 
			
		||||
  hostNetwork: true
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -140,6 +140,12 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
 | 
			
		||||
		framework.Logf("WARNING: Image pulling pods failed to enter success in %v: %v", imagePrePullingTimeout, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Dump the output of the nethealth containers only once per run
 | 
			
		||||
	if framework.TestContext.DumpLogsOnFailure {
 | 
			
		||||
		framework.Logf("Dumping network health container logs from all nodes")
 | 
			
		||||
		framework.LogContainersInPodsWithLabels(c, api.NamespaceSystem, framework.ImagePullerLabels, "nethealth")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
 | 
			
		||||
}, func(data []byte) {
 | 
			
		||||
 
 | 
			
		||||
@@ -276,7 +276,7 @@ func (f *Framework) AfterEach() {
 | 
			
		||||
	if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
 | 
			
		||||
		DumpAllNamespaceInfo(f.Client, f.Namespace.Name)
 | 
			
		||||
		By(fmt.Sprintf("Dumping a list of prepulled images on each node"))
 | 
			
		||||
		LogPodsWithLabels(f.Client, api.NamespaceSystem, ImagePullerLabels)
 | 
			
		||||
		LogContainersInPodsWithLabels(f.Client, api.NamespaceSystem, ImagePullerLabels, "image-puller")
 | 
			
		||||
		if f.federated {
 | 
			
		||||
			// Print logs of federation control plane pods (federation-apiserver and federation-controller-manager)
 | 
			
		||||
			LogPodsWithLabels(f.Client, "federation", map[string]string{"app": "federated-cluster"})
 | 
			
		||||
 
 | 
			
		||||
@@ -664,17 +664,20 @@ func RunKubernetesServiceTestContainer(c *client.Client, repoRoot string, ns str
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func kubectlLogPod(c *client.Client, pod api.Pod) {
 | 
			
		||||
func kubectlLogPod(c *client.Client, pod api.Pod, containerNameSubstr string) {
 | 
			
		||||
	for _, container := range pod.Spec.Containers {
 | 
			
		||||
		logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
 | 
			
		||||
		if strings.Contains(container.Name, containerNameSubstr) {
 | 
			
		||||
			// Contains() matches all strings if substr is empty
 | 
			
		||||
			logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				Logf("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
 | 
			
		||||
				logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					Logf("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			By(fmt.Sprintf("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName))
 | 
			
		||||
			Logf("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name)
 | 
			
		||||
		}
 | 
			
		||||
		By(fmt.Sprintf("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName))
 | 
			
		||||
		Logf(logs)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -687,7 +690,7 @@ func LogFailedContainers(c *client.Client, ns string) {
 | 
			
		||||
	Logf("Running kubectl logs on non-ready containers in %v", ns)
 | 
			
		||||
	for _, pod := range podList.Items {
 | 
			
		||||
		if res, err := PodRunningReady(&pod); !res || err != nil {
 | 
			
		||||
			kubectlLogPod(c, pod)
 | 
			
		||||
			kubectlLogPod(c, pod, "")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@@ -700,7 +703,18 @@ func LogPodsWithLabels(c *client.Client, ns string, match map[string]string) {
 | 
			
		||||
	}
 | 
			
		||||
	Logf("Running kubectl logs on pods with labels %v in %v", match, ns)
 | 
			
		||||
	for _, pod := range podList.Items {
 | 
			
		||||
		kubectlLogPod(c, pod)
 | 
			
		||||
		kubectlLogPod(c, pod, "")
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func LogContainersInPodsWithLabels(c *client.Client, ns string, match map[string]string, containerSubstr string) {
 | 
			
		||||
	podList, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		Logf("Error getting pods in namespace %q: %v", ns, err)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	for _, pod := range podList.Items {
 | 
			
		||||
		kubectlLogPod(c, pod, containerSubstr)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user