diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 65be81d3d40..2c649010bd0 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/apis/experimental" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/fieldpath" "k8s.io/kubernetes/pkg/fields" qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util" @@ -69,6 +70,7 @@ func describerMap(c *client.Client) map[string]Describer { m := map[string]Describer{ "Pod": &PodDescriber{c}, "ReplicationController": &ReplicationControllerDescriber{c}, + "Daemon": &DaemonDescriber{c}, "Secret": &SecretDescriber{c}, "Service": &ServiceDescriber{c}, "ServiceAccount": &ServiceAccountDescriber{c}, @@ -128,6 +130,7 @@ func init() { describePod, describeService, describeReplicationController, + describeDaemon, describeNode, describeNamespace, ) @@ -423,6 +426,7 @@ type PodDescriber struct { func (d *PodDescriber) Describe(namespace, name string) (string, error) { rc := d.ReplicationControllers(namespace) + dc := d.Daemons(namespace) pc := d.Pods(namespace) pod, err := pc.Get(name) @@ -453,11 +457,15 @@ func (d *PodDescriber) Describe(namespace, name string) (string, error) { if err != nil { return "", err } + daemons, err := getDaemonsForLabels(dc, labels.Set(pod.Labels)) + if err != nil { + return "", err + } - return describePod(pod, rcs, events) + return describePod(pod, rcs, daemons, events) } -func describePod(pod *api.Pod, rcs []api.ReplicationController, events *api.EventList) (string, error) { +func describePod(pod *api.Pod, rcs []api.ReplicationController, daemons []expapi.Daemon, events *api.EventList) (string, error) { return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", pod.Name) fmt.Fprintf(out, "Namespace:\t%s\n", pod.Namespace) @@ -477,6 +485,7 @@ func describePod(pod *api.Pod, rcs []api.ReplicationController, events *api.Even fmt.Fprintf(out, "Message:\t%s\n", pod.Status.Message) fmt.Fprintf(out, "IP:\t%s\n", pod.Status.PodIP) fmt.Fprintf(out, "Replication Controllers:\t%s\n", printReplicationControllersByLabels(rcs)) + fmt.Fprintf(out, "Daemons:\t%s\n", printDaemonsByLabels(daemons)) fmt.Fprintf(out, "Containers:\n") describeContainers(pod, out) if len(pod.Status.Conditions) > 0 { @@ -823,7 +832,7 @@ func (d *ReplicationControllerDescriber) Describe(namespace, name string) (strin return "", err } - running, waiting, succeeded, failed, err := getPodStatusForReplicationController(pc, controller) + running, waiting, succeeded, failed, err := getPodStatusForController(pc, controller.Spec.Selector) if err != nil { return "", err } @@ -896,6 +905,52 @@ func describeJob(job *experimental.Job, events *api.EventList) (string, error) { }) } +// DaemonDescriber generates information about a daemon and the pods it has created. +type DaemonDescriber struct { + client.Interface +} + +func (d *DaemonDescriber) Describe(namespace, name string) (string, error) { + dc := d.Daemons(namespace) + pc := d.Pods(namespace) + + daemon, err := dc.Get(name) + if err != nil { + return "", err + } + + running, waiting, succeeded, failed, err := getPodStatusForController(pc, daemon.Spec.Selector) + if err != nil { + return "", err + } + + events, _ := d.Events(namespace).Search(daemon) + + return describeDaemon(daemon, events, running, waiting, succeeded, failed) +} + +func describeDaemon(daemon *expapi.Daemon, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { + return tabbedString(func(out io.Writer) error { + fmt.Fprintf(out, "Name:\t%s\n", daemon.Name) + if daemon.Spec.Template != nil { + fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&daemon.Spec.Template.Spec)) + } else { + fmt.Fprintf(out, "Image(s):\t%s\n", "") + } + fmt.Fprintf(out, "Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Selector)) + fmt.Fprintf(out, "Node-Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector)) + fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(daemon.Labels)) + fmt.Fprintf(out, "Desired Number of Nodes Scheduled: %d\n", daemon.Status.DesiredNumberScheduled) + fmt.Fprintf(out, "Current Number of Nodes Scheduled: %d\n", daemon.Status.CurrentNumberScheduled) + fmt.Fprintf(out, "Number of Nodes Misscheduled: %d\n", daemon.Status.NumberMisscheduled) + fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) + if events != nil { + DescribeEvents(events, out) + } + return nil + }) +} + // SecretDescriber generates information about a secret type SecretDescriber struct { client.Interface @@ -1347,6 +1402,30 @@ func DescribeEvents(el *api.EventList, w io.Writer) { } } +// Get all daemons whose selectors would match a given set of labels. +// TODO: Move this to pkg/client and ideally implement it server-side (instead +// of getting all RC's and searching through them manually). +// TODO: write an interface for controllers and fuse getReplicationControllersForLabels +// and getDaemonsForLabels. +func getDaemonsForLabels(c client.DaemonInterface, labelsToMatch labels.Labels) ([]expapi.Daemon, error) { + // Get all daemon controllers. + // TODO this needs a namespace scope as argument + daemons, err := c.List(labels.Everything()) + if err != nil { + return nil, fmt.Errorf("error getting daemons: %v", err) + } + + // Find the ones that match labelsToMatch. + var matchingDaemons []expapi.Daemon + for _, daemon := range daemons.Items { + selector := labels.SelectorFromSet(daemon.Spec.Selector) + if selector.Matches(labelsToMatch) { + matchingDaemons = append(matchingDaemons, daemon) + } + } + return matchingDaemons, nil +} + // Get all replication controllers whose selectors would match a given set of // labels. // TODO Move this to pkg/client and ideally implement it server-side (instead @@ -1370,6 +1449,20 @@ func getReplicationControllersForLabels(c client.ReplicationControllerInterface, return matchingRCs, nil } +func printDaemonsByLabels(matchingDaemons []expapi.Daemon) string { + // Format the matching RC's into strings. + var daemonStrings []string + for _, daemon := range matchingDaemons { + daemonStrings = append(daemonStrings, fmt.Sprintf("%s (%d desired, %d nodes scheduled, %d nodes misscheduled)", daemon.Name, daemon.Status.DesiredNumberScheduled, daemon.Status.CurrentNumberScheduled, daemon.Status.NumberMisscheduled)) + } + + list := strings.Join(daemonStrings, ", ") + if list == "" { + return "" + } + return list +} + func printReplicationControllersByLabels(matchingRCs []api.ReplicationController) string { // Format the matching RC's into strings. var rcStrings []string @@ -1384,8 +1477,8 @@ func printReplicationControllersByLabels(matchingRCs []api.ReplicationController return list } -func getPodStatusForReplicationController(c client.PodInterface, controller *api.ReplicationController) (running, waiting, succeeded, failed int, err error) { - rcPods, err := c.List(labels.SelectorFromSet(controller.Spec.Selector), fields.Everything()) +func getPodStatusForController(c client.PodInterface, selector map[string]string) (running, waiting, succeeded, failed int, err error) { + rcPods, err := c.List(labels.SelectorFromSet(selector), fields.Everything()) if err != nil { return } diff --git a/pkg/kubectl/kubectl.go b/pkg/kubectl/kubectl.go index 15c9c0d6cb4..a6fe0f743c4 100644 --- a/pkg/kubectl/kubectl.go +++ b/pkg/kubectl/kubectl.go @@ -105,6 +105,7 @@ func expandResourceShortcut(resource string) string { "pvc": "persistentvolumeclaims", "quota": "resourcequotas", "rc": "replicationcontrollers", + "dm": "daemons", "svc": "services", } if expanded, ok := shortForms[resource]; ok { diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index 81f095cd01f..20a0aaba9bc 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -384,6 +384,7 @@ var jobColumns = []string{"JOB", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "SUCCES var serviceColumns = []string{"NAME", "CLUSTER_IP", "EXTERNAL_IP", "PORT(S)", "SELECTOR", "AGE"} var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"} var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"} +var daemonColumns = []string{"CONTROLLER", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "NODE-SELECTOR"} var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"} var limitRangeColumns = []string{"NAME", "AGE"} var resourceQuotaColumns = []string{"NAME", "AGE"} @@ -406,6 +407,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() { h.Handler(podTemplateColumns, printPodTemplateList) h.Handler(replicationControllerColumns, printReplicationController) h.Handler(replicationControllerColumns, printReplicationControllerList) + h.Handler(daemonColumns, printDaemon) + h.Handler(daemonColumns, printDaemonList) h.Handler(jobColumns, printJob) h.Handler(jobColumns, printJobList) h.Handler(serviceColumns, printService) @@ -810,6 +813,60 @@ func printServiceList(list *api.ServiceList, w io.Writer, withNamespace bool, wi return nil } +func printDaemon(daemon *expapi.Daemon, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { + name := daemon.Name + namespace := daemon.Namespace + + containers := daemon.Spec.Template.Spec.Containers + var firstContainer api.Container + if len(containers) > 0 { + firstContainer, containers = containers[0], containers[1:] + } + + if withNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + name, + firstContainer.Name, + firstContainer.Image, + labels.FormatLabels(daemon.Spec.Selector), + labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector), + ); err != nil { + return err + } + if _, err := fmt.Fprint(w, appendLabels(daemon.Labels, columnLabels)); err != nil { + return err + } + + // Lay out all the other containers on separate lines. + extraLinePrefix := "\t" + if withNamespace { + extraLinePrefix = "\t\t" + } + for _, container := range containers { + _, err := fmt.Fprintf(w, "%s%s\t%s\t%s\t%s", extraLinePrefix, container.Name, container.Image, "", "") + if err != nil { + return err + } + if _, err := fmt.Fprint(w, appendLabelTabs(columnLabels)); err != nil { + return err + } + } + return nil +} + +func printDaemonList(list *expapi.DaemonList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { + for _, daemon := range list.Items { + if err := printDaemon(&daemon, w, withNamespace, wide, showAll, columnLabels); err != nil { + return err + } + } + return nil +} + func printEndpoints(endpoints *api.Endpoints, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { name := endpoints.Name namespace := endpoints.Namespace diff --git a/pkg/kubectl/stop.go b/pkg/kubectl/stop.go index bca8ff277d9..490e98ccdb1 100644 --- a/pkg/kubectl/stop.go +++ b/pkg/kubectl/stop.go @@ -21,10 +21,13 @@ import ( "strings" "time" + fuzz "github.com/google/gofuzz" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/meta" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/util/wait" ) const ( @@ -56,6 +59,8 @@ func ReaperFor(kind string, c client.Interface) (Reaper, error) { switch kind { case "ReplicationController": return &ReplicationControllerReaper{c, Interval, Timeout}, nil + case "Daemon": + return &DaemonReaper{c, Interval, Timeout}, nil case "Pod": return &PodReaper{c}, nil case "Service": @@ -72,6 +77,10 @@ type ReplicationControllerReaper struct { client.Interface pollInterval, timeout time.Duration } +type DaemonReaper struct { + client.Interface + pollInterval, timeout time.Duration +} type PodReaper struct { client.Interface } @@ -167,6 +176,62 @@ func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout return fmt.Sprintf("%s stopped", name), nil } +func (reaper *DaemonReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) (string, error) { + // Retrieve the daemon we want to stop. + daemonClient := reaper.Daemons(namespace) + daemon, err := daemonClient.Get(name) + if err != nil { + return "", err + } + + // Update the daemon to select for a non-existent NodeName. + // The daemon manager will then kill all the daemon pods corresponding to daemon daemon. + nodes, err := reaper.Nodes().List(labels.Everything(), fields.Everything()) + if err != nil { + return "", err + } + var fuzzer = fuzz.New() + var nameExists bool + numRetries := 1 + for try := 0; try <= numRetries; try++ { + var nodeName string + fuzzer.Fuzz(&nodeName) + nameExists = false + for _, node := range nodes.Items { + nameExists = nameExists || node.Name == nodeName + } + if !nameExists { + daemon.Spec.Template.Spec.NodeName = nodeName + break + } + } + if nameExists { + // Probability of reaching here is extremely low, most likely indicates a programming bug/library error. + return "", fmt.Errorf("Failed to stop node.") + } + daemonClient.Update(daemon) + + // Wait for the daemon manager to kill all the daemon's daemon pods. + daemonPodsKilled := func() (bool, error) { + updatedDc, err := daemonClient.Get(name) + if err != nil { + // We don't return an error, because returning an error will abort wait.Poll, but + // if there's an error, we want to try getting the daemon again. + return false, nil + } + return updatedDc.Status.CurrentNumberScheduled+updatedDc.Status.NumberMisscheduled == 0, nil + } + if err := wait.Poll(reaper.pollInterval, reaper.timeout, daemonPodsKilled); err != nil { + return "", err + } + + // Finally, kill the daemon. + if err := daemonClient.Delete(name); err != nil { + return "", err + } + return fmt.Sprintf("%s stopped", name), nil +} + func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) (string, error) { pods := reaper.Pods(namespace) _, err := pods.Get(name)