kubernetes/pkg/registry/pod/strategy.go
Russ Cox 58629a28e4 pkg/registry/pod: avoid allocation in common pod search
PodToSelectableFields creates a map of field attributes
for a particular pod filter query to use. If the result
of the query does not depend on the fields at all, avoid
creating the map.

This is the source of about half the allocated memory
(by byte volume) during the kubemark benchmark, and it
is in turn the main driver of CPU usage during the benchmark,
because of the many background pod watches going on,
as well as the occasional list pods.

These benchmarks for 1000-node kubemark show the difference
from my previous CL (caching timers) to this CL:

name                               old ms/op   new ms/op   delta
LIST_nodes_p50                       124 ±13%    121 ± 9%     ~     (p=0.136 n=29+27)
LIST_nodes_p90                       278 ±15%    266 ±12%   -4.26%  (p=0.031 n=29+27)
LIST_nodes_p99                       405 ±19%    400 ±14%     ~     (p=0.864 n=28+28)
LIST_pods_p50                       65.3 ±13%   56.3 ± 9%  -13.75%  (p=0.000 n=29+28)
LIST_pods_p90                        115 ±12%     93 ± 8%  -18.75%  (p=0.000 n=27+28)
LIST_pods_p99                        226 ±21%    202 ±14%  -10.52%  (p=0.000 n=28+28)
LIST_replicationcontrollers_p50     26.6 ±43%   26.2 ±54%     ~     (p=0.487 n=29+29)
LIST_replicationcontrollers_p90     68.7 ±63%   68.6 ±59%     ~     (p=0.931 n=29+28)
LIST_replicationcontrollers_p99      173 ±41%    177 ±49%     ~     (p=0.618 n=28+29)
PUT_replicationcontrollers_p50      5.83 ±36%   5.94 ±32%     ~     (p=0.818 n=28+29)
PUT_replicationcontrollers_p90      15.9 ± 6%   15.5 ± 6%   -2.23%  (p=0.019 n=28+29)
PUT_replicationcontrollers_p99      56.7 ±41%   39.5 ±55%  -30.29%  (p=0.000 n=28+29)
DELETE_pods_p50                     24.3 ±17%   24.3 ±13%     ~     (p=0.855 n=28+29)
DELETE_pods_p90                     30.6 ± 0%   30.7 ± 1%     ~     (p=0.140 n=28+29)
DELETE_pods_p99                     56.3 ±27%   54.2 ±23%     ~     (p=0.188 n=28+27)
PUT_nodes_p50                       14.9 ± 1%   14.8 ± 2%     ~     (p=0.781 n=28+27)
PUT_nodes_p90                       16.4 ± 2%   16.3 ± 2%     ~     (p=0.321 n=28+28)
PUT_nodes_p99                       44.6 ±42%   41.3 ±35%     ~     (p=0.361 n=29+28)
POST_replicationcontrollers_p50     6.33 ±23%   6.34 ±20%     ~     (p=0.993 n=28+28)
POST_replicationcontrollers_p90     15.2 ± 6%   15.0 ± 5%     ~     (p=0.106 n=28+29)
POST_replicationcontrollers_p99     53.4 ±52%   32.9 ±46%  -38.41%  (p=0.000 n=27+27)
POST_pods_p50                       9.33 ±13%   8.95 ±16%     ~     (p=0.069 n=29+29)
POST_pods_p90                       16.3 ± 4%   16.1 ± 4%   -1.43%  (p=0.044 n=29+29)
POST_pods_p99                       28.4 ±23%   26.4 ±12%   -7.05%  (p=0.004 n=29+28)
DELETE_replicationcontrollers_p50   2.50 ±13%   2.50 ±13%     ~     (p=0.649 n=29+28)
DELETE_replicationcontrollers_p90   11.7 ±10%   11.8 ±13%     ~     (p=0.863 n=28+28)
DELETE_replicationcontrollers_p99   19.0 ±22%   19.1 ±21%     ~     (p=0.818 n=28+29)
PUT_pods_p50                        10.3 ± 5%   10.2 ± 5%     ~     (p=0.235 n=28+27)
PUT_pods_p90                        16.0 ± 1%   16.0 ± 1%     ~     (p=0.380 n=29+28)
PUT_pods_p99                        21.6 ±14%   20.9 ± 9%   -3.15%  (p=0.010 n=28+27)
POST_bindings_p50                   8.98 ±17%   8.92 ±15%     ~     (p=0.666 n=29+28)
POST_bindings_p90                   16.5 ± 2%   16.5 ± 3%     ~     (p=0.840 n=26+29)
POST_bindings_p99                   21.4 ± 5%   21.1 ± 4%   -1.21%  (p=0.049 n=27+28)
GET_nodes_p90                       1.18 ±19%   1.14 ±24%     ~     (p=0.137 n=29+29)
GET_nodes_p99                       8.29 ±40%   7.50 ±46%     ~     (p=0.106 n=28+29)
GET_replicationcontrollers_p90      1.03 ±21%   1.01 ±27%     ~     (p=0.489 n=29+29)
GET_replicationcontrollers_p99     10.0 ±123%  10.0 ±145%     ~     (p=0.794 n=28+29)
GET_pods_p90                        1.08 ±21%   1.02 ±19%     ~     (p=0.083 n=29+28)
GET_pods_p99                        2.81 ±39%   2.45 ±38%  -12.78%  (p=0.021 n=28+25)

Overall the two CLs combined have this effect:

name                               old ms/op  new ms/op   delta
LIST_nodes_p50                      127 ±16%    121 ± 9%   -4.58%  (p=0.000 n=29+27)
LIST_nodes_p90                      326 ±12%    266 ±12%  -18.48%  (p=0.000 n=29+27)
LIST_nodes_p99                      453 ±11%    400 ±14%  -11.79%  (p=0.000 n=29+28)
LIST_replicationcontrollers_p50    29.4 ±49%   26.2 ±54%     ~     (p=0.085 n=30+29)
LIST_replicationcontrollers_p90    83.0 ±78%   68.6 ±59%  -17.33%  (p=0.013 n=30+28)
LIST_replicationcontrollers_p99     216 ±43%    177 ±49%  -17.68%  (p=0.000 n=29+29)
DELETE_pods_p50                    24.5 ±14%   24.3 ±13%     ~     (p=0.562 n=30+29)
DELETE_pods_p90                    30.7 ± 1%   30.7 ± 1%   -0.30%  (p=0.011 n=29+29)
DELETE_pods_p99                    77.2 ±34%   54.2 ±23%  -29.76%  (p=0.000 n=30+27)
PUT_replicationcontrollers_p50     5.86 ±26%   5.94 ±32%     ~     (p=0.734 n=29+29)
PUT_replicationcontrollers_p90     15.8 ± 7%   15.5 ± 6%   -2.06%  (p=0.010 n=29+29)
PUT_replicationcontrollers_p99     57.8 ±35%   39.5 ±55%  -31.60%  (p=0.000 n=29+29)
PUT_nodes_p50                      14.9 ± 2%   14.8 ± 2%   -0.68%  (p=0.012 n=30+27)
PUT_nodes_p90                      16.5 ± 1%   16.3 ± 2%   -0.90%  (p=0.000 n=27+28)
PUT_nodes_p99                      57.9 ±47%   41.3 ±35%  -28.61%  (p=0.000 n=30+28)
POST_replicationcontrollers_p50    6.35 ±29%   6.34 ±20%     ~     (p=0.944 n=30+28)
POST_replicationcontrollers_p90    15.4 ± 5%   15.0 ± 5%   -2.18%  (p=0.001 n=29+29)
POST_replicationcontrollers_p99    52.2 ±71%   32.9 ±46%  -36.99%  (p=0.000 n=29+27)
POST_pods_p50                      8.99 ±13%   8.95 ±16%     ~     (p=0.903 n=30+29)
POST_pods_p90                      16.2 ± 4%   16.1 ± 4%     ~     (p=0.287 n=29+29)
POST_pods_p99                      30.9 ±21%   26.4 ±12%  -14.73%  (p=0.000 n=28+28)
POST_bindings_p50                  9.34 ±12%   8.92 ±15%   -4.54%  (p=0.013 n=30+28)
POST_bindings_p90                  16.6 ± 1%   16.5 ± 3%   -0.73%  (p=0.017 n=28+29)
POST_bindings_p99                  23.5 ± 9%   21.1 ± 4%  -10.09%  (p=0.000 n=27+28)
PUT_pods_p50                       10.8 ±11%   10.2 ± 5%   -5.47%  (p=0.000 n=30+27)
PUT_pods_p90                       16.1 ± 1%   16.0 ± 1%   -0.64%  (p=0.000 n=29+28)
PUT_pods_p99                       23.4 ± 9%   20.9 ± 9%  -10.93%  (p=0.000 n=28+27)
DELETE_replicationcontrollers_p50  2.42 ±16%   2.50 ±13%     ~     (p=0.054 n=29+28)
DELETE_replicationcontrollers_p90  11.5 ±12%   11.8 ±13%     ~     (p=0.141 n=30+28)
DELETE_replicationcontrollers_p99  19.5 ±21%   19.1 ±21%     ~     (p=0.397 n=29+29)
GET_nodes_p50                      0.77 ±10%   0.76 ±10%     ~     (p=0.317 n=28+28)
GET_nodes_p90                      1.20 ±16%   1.14 ±24%   -4.66%  (p=0.036 n=28+29)
GET_nodes_p99                      11.4 ±48%    7.5 ±46%  -34.28%  (p=0.000 n=28+29)
GET_replicationcontrollers_p50     0.74 ±17%   0.73 ±17%     ~     (p=0.222 n=30+28)
GET_replicationcontrollers_p90     1.04 ±25%   1.01 ±27%     ~     (p=0.231 n=30+29)
GET_replicationcontrollers_p99     12.1 ±81%  10.0 ±145%     ~     (p=0.063 n=28+29)
GET_pods_p50                       0.78 ±12%   0.77 ±10%     ~     (p=0.178 n=30+28)
GET_pods_p90                       1.06 ±19%   1.02 ±19%     ~     (p=0.120 n=29+28)
GET_pods_p99                       3.92 ±43%   2.45 ±38%  -37.55%  (p=0.000 n=27+25)
LIST_services_p50                  0.20 ±13%   0.20 ±16%     ~     (p=0.854 n=28+29)
LIST_services_p90                  0.28 ±15%   0.27 ±14%     ~     (p=0.219 n=29+28)
LIST_services_p99                  0.49 ±20%   0.47 ±24%     ~     (p=0.140 n=29+29)
LIST_endpoints_p50                 0.19 ±14%   0.19 ±15%     ~     (p=0.709 n=29+29)
LIST_endpoints_p90                 0.26 ±16%   0.26 ±13%     ~     (p=0.274 n=29+28)
LIST_endpoints_p99                 0.46 ±24%   0.44 ±21%     ~     (p=0.111 n=29+29)
LIST_horizontalpodautoscalers_p50  0.16 ±15%   0.15 ±13%     ~     (p=0.253 n=30+27)
LIST_horizontalpodautoscalers_p90  0.22 ±24%   0.21 ±16%     ~     (p=0.152 n=30+28)
LIST_horizontalpodautoscalers_p99  0.31 ±33%   0.31 ±38%     ~     (p=0.817 n=28+29)
LIST_daemonsets_p50                0.16 ±20%   0.15 ±11%     ~     (p=0.135 n=30+27)
LIST_daemonsets_p90                0.22 ±18%   0.21 ±25%     ~     (p=0.135 n=29+28)
LIST_daemonsets_p99                0.29 ±28%   0.29 ±32%     ~     (p=0.606 n=28+28)
LIST_jobs_p50                      0.16 ±16%   0.15 ±12%     ~     (p=0.375 n=29+28)
LIST_jobs_p90                      0.22 ±18%   0.21 ±16%     ~     (p=0.090 n=29+26)
LIST_jobs_p99                      0.31 ±28%   0.28 ±35%  -10.29%  (p=0.005 n=29+27)
LIST_deployments_p50               0.15 ±16%   0.15 ±13%     ~     (p=0.565 n=29+28)
LIST_deployments_p90               0.22 ±22%   0.21 ±19%     ~     (p=0.107 n=30+28)
LIST_deployments_p99               0.31 ±27%   0.29 ±34%     ~     (p=0.068 n=29+28)
LIST_namespaces_p50                0.21 ±25%   0.21 ±26%     ~     (p=0.768 n=29+27)
LIST_namespaces_p90                0.28 ±29%   0.26 ±25%     ~     (p=0.101 n=30+28)
LIST_namespaces_p99                0.30 ±48%   0.29 ±42%     ~     (p=0.339 n=30+29)
LIST_replicasets_p50               0.15 ±18%   0.15 ±16%     ~     (p=0.612 n=30+28)
LIST_replicasets_p90               0.22 ±19%   0.21 ±18%   -5.13%  (p=0.011 n=28+27)
LIST_replicasets_p99               0.31 ±39%   0.28 ±29%     ~     (p=0.066 n=29+28)
LIST_persistentvolumes_p50         0.16 ±23%   0.15 ±21%     ~     (p=0.124 n=30+29)
LIST_persistentvolumes_p90         0.21 ±23%   0.20 ±23%     ~     (p=0.092 n=30+25)
LIST_persistentvolumes_p99         0.21 ±24%   0.20 ±23%     ~     (p=0.053 n=30+25)
LIST_resourcequotas_p50            0.16 ±12%   0.16 ±13%     ~     (p=0.175 n=27+28)
LIST_resourcequotas_p90            0.20 ±22%   0.20 ±24%     ~     (p=0.388 n=30+28)
LIST_resourcequotas_p99            0.22 ±24%   0.22 ±23%     ~     (p=0.575 n=30+28)
LIST_persistentvolumeclaims_p50    0.15 ±21%   0.15 ±29%     ~     (p=0.079 n=30+28)
LIST_persistentvolumeclaims_p90    0.19 ±26%   0.18 ±34%     ~     (p=0.446 n=29+29)
LIST_persistentvolumeclaims_p99    0.19 ±26%   0.18 ±34%     ~     (p=0.446 n=29+29)
LIST_pods_p50                      68.0 ±16%   56.3 ± 9%  -17.19%  (p=0.000 n=29+28)
LIST_pods_p90                       119 ±19%     93 ± 8%  -21.88%  (p=0.000 n=28+28)
LIST_pods_p99                       230 ±18%    202 ±14%  -12.13%  (p=0.000 n=27+28)
2016-04-21 15:53:47 -04:00

480 lines
14 KiB
Go

/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"fmt"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/validation"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/kubelet/client"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/registry/generic"
"k8s.io/kubernetes/pkg/runtime"
utilnet "k8s.io/kubernetes/pkg/util/net"
"k8s.io/kubernetes/pkg/util/validation/field"
)
// podStrategy implements behavior for Pods
type podStrategy struct {
runtime.ObjectTyper
api.NameGenerator
}
// Strategy is the default logic that applies when creating and updating Pod
// objects via the REST API.
var Strategy = podStrategy{api.Scheme, api.SimpleNameGenerator}
// NamespaceScoped is true for pods.
func (podStrategy) NamespaceScoped() bool {
return true
}
// PrepareForCreate clears fields that are not allowed to be set by end users on creation.
func (podStrategy) PrepareForCreate(obj runtime.Object) {
pod := obj.(*api.Pod)
pod.Status = api.PodStatus{
Phase: api.PodPending,
}
}
// PrepareForUpdate clears fields that are not allowed to be set by end users on update.
func (podStrategy) PrepareForUpdate(obj, old runtime.Object) {
newPod := obj.(*api.Pod)
oldPod := old.(*api.Pod)
newPod.Status = oldPod.Status
}
// Validate validates a new pod.
func (podStrategy) Validate(ctx api.Context, obj runtime.Object) field.ErrorList {
pod := obj.(*api.Pod)
return validation.ValidatePod(pod)
}
// Canonicalize normalizes the object after validation.
func (podStrategy) Canonicalize(obj runtime.Object) {
}
// AllowCreateOnUpdate is false for pods.
func (podStrategy) AllowCreateOnUpdate() bool {
return false
}
// ValidateUpdate is the default update validation for an end user.
func (podStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList {
errorList := validation.ValidatePod(obj.(*api.Pod))
return append(errorList, validation.ValidatePodUpdate(obj.(*api.Pod), old.(*api.Pod))...)
}
// AllowUnconditionalUpdate allows pods to be overwritten
func (podStrategy) AllowUnconditionalUpdate() bool {
return true
}
// CheckGracefulDelete allows a pod to be gracefully deleted. It updates the DeleteOptions to
// reflect the desired grace value.
func (podStrategy) CheckGracefulDelete(obj runtime.Object, options *api.DeleteOptions) bool {
if options == nil {
return false
}
pod := obj.(*api.Pod)
period := int64(0)
// user has specified a value
if options.GracePeriodSeconds != nil {
period = *options.GracePeriodSeconds
} else {
// use the default value if set, or deletes the pod immediately (0)
if pod.Spec.TerminationGracePeriodSeconds != nil {
period = *pod.Spec.TerminationGracePeriodSeconds
}
}
// if the pod is not scheduled, delete immediately
if len(pod.Spec.NodeName) == 0 {
period = 0
}
// if the pod is already terminated, delete immediately
if pod.Status.Phase == api.PodFailed || pod.Status.Phase == api.PodSucceeded {
period = 0
}
// ensure the options and the pod are in sync
options.GracePeriodSeconds = &period
return true
}
type podStrategyWithoutGraceful struct {
podStrategy
}
// CheckGracefulDelete prohibits graceful deletion.
func (podStrategyWithoutGraceful) CheckGracefulDelete(obj runtime.Object, options *api.DeleteOptions) bool {
return false
}
// StrategyWithoutGraceful implements the legacy instant delele behavior.
var StrategyWithoutGraceful = podStrategyWithoutGraceful{Strategy}
type podStatusStrategy struct {
podStrategy
}
var StatusStrategy = podStatusStrategy{Strategy}
func (podStatusStrategy) PrepareForUpdate(obj, old runtime.Object) {
newPod := obj.(*api.Pod)
oldPod := old.(*api.Pod)
newPod.Spec = oldPod.Spec
newPod.DeletionTimestamp = nil
}
func (podStatusStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList {
// TODO: merge valid fields after update
return validation.ValidatePodStatusUpdate(obj.(*api.Pod), old.(*api.Pod))
}
// MatchPod returns a generic matcher for a given label and field selector.
func MatchPod(label labels.Selector, field fields.Selector) generic.Matcher {
return &generic.SelectionPredicate{
Label: label,
Field: field,
GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) {
pod, ok := obj.(*api.Pod)
if !ok {
return nil, nil, fmt.Errorf("not a pod")
}
// podLabels is already sitting there ready to be used.
// podFields is not available directly and requires allocation of a map.
// Only bother if the fields might be useful to determining the match.
// One common case is for a replication controller to set up a watch
// based on labels alone; in that case we can avoid allocating the field map.
// This is especially important in the apiserver.
podLabels := labels.Set(pod.ObjectMeta.Labels)
var podFields fields.Set
if !field.Empty() && label.Matches(podLabels) {
podFields = PodToSelectableFields(pod)
}
return podLabels, podFields, nil
},
}
}
// PodToSelectableFields returns a field set that represents the object
// TODO: fields are not labels, and the validation rules for them do not apply.
func PodToSelectableFields(pod *api.Pod) fields.Set {
objectMetaFieldsSet := generic.ObjectMetaFieldsSet(pod.ObjectMeta, true)
podSpecificFieldsSet := fields.Set{
"spec.nodeName": pod.Spec.NodeName,
"spec.restartPolicy": string(pod.Spec.RestartPolicy),
"status.phase": string(pod.Status.Phase),
}
return generic.MergeFieldsSets(objectMetaFieldsSet, podSpecificFieldsSet)
}
// ResourceGetter is an interface for retrieving resources by ResourceLocation.
type ResourceGetter interface {
Get(api.Context, string) (runtime.Object, error)
}
func getPod(getter ResourceGetter, ctx api.Context, name string) (*api.Pod, error) {
obj, err := getter.Get(ctx, name)
if err != nil {
return nil, err
}
pod := obj.(*api.Pod)
if pod == nil {
return nil, fmt.Errorf("Unexpected object type: %#v", pod)
}
return pod, nil
}
// ResourceLocation returns a URL to which one can send traffic for the specified pod.
func ResourceLocation(getter ResourceGetter, rt http.RoundTripper, ctx api.Context, id string) (*url.URL, http.RoundTripper, error) {
// Allow ID as "podname" or "podname:port" or "scheme:podname:port".
// If port is not specified, try to use the first defined port on the pod.
scheme, name, port, valid := utilnet.SplitSchemeNamePort(id)
if !valid {
return nil, nil, errors.NewBadRequest(fmt.Sprintf("invalid pod request %q", id))
}
// TODO: if port is not a number but a "(container)/(portname)", do a name lookup.
pod, err := getPod(getter, ctx, name)
if err != nil {
return nil, nil, err
}
// Try to figure out a port.
if port == "" {
for i := range pod.Spec.Containers {
if len(pod.Spec.Containers[i].Ports) > 0 {
port = fmt.Sprintf("%d", pod.Spec.Containers[i].Ports[0].ContainerPort)
break
}
}
}
loc := &url.URL{
Scheme: scheme,
}
if port == "" {
loc.Host = pod.Status.PodIP
} else {
loc.Host = net.JoinHostPort(pod.Status.PodIP, port)
}
return loc, rt, nil
}
// getContainerNames returns a formatted string containing the container names
func getContainerNames(pod *api.Pod) string {
names := []string{}
for _, c := range pod.Spec.Containers {
names = append(names, c.Name)
}
return strings.Join(names, " ")
}
// LogLocation returns the log URL for a pod container. If opts.Container is blank
// and only one container is present in the pod, that container is used.
func LogLocation(
getter ResourceGetter,
connInfo client.ConnectionInfoGetter,
ctx api.Context,
name string,
opts *api.PodLogOptions,
) (*url.URL, http.RoundTripper, error) {
pod, err := getPod(getter, ctx, name)
if err != nil {
return nil, nil, err
}
// Try to figure out a container
// If a container was provided, it must be valid
container := opts.Container
if len(container) == 0 {
switch len(pod.Spec.Containers) {
case 1:
container = pod.Spec.Containers[0].Name
case 0:
return nil, nil, errors.NewBadRequest(fmt.Sprintf("a container name must be specified for pod %s", name))
default:
containerNames := getContainerNames(pod)
return nil, nil, errors.NewBadRequest(fmt.Sprintf("a container name must be specified for pod %s, choose one of: [%s]", name, containerNames))
}
} else {
if !podHasContainerWithName(pod, container) {
return nil, nil, errors.NewBadRequest(fmt.Sprintf("container %s is not valid for pod %s", container, name))
}
}
nodeHost := pod.Spec.NodeName
if len(nodeHost) == 0 {
// If pod has not been assigned a host, return an empty location
return nil, nil, nil
}
nodeScheme, nodePort, nodeTransport, err := connInfo.GetConnectionInfo(ctx, nodeHost)
if err != nil {
return nil, nil, err
}
params := url.Values{}
if opts.Follow {
params.Add("follow", "true")
}
if opts.Previous {
params.Add("previous", "true")
}
if opts.Timestamps {
params.Add("timestamps", "true")
}
if opts.SinceSeconds != nil {
params.Add("sinceSeconds", strconv.FormatInt(*opts.SinceSeconds, 10))
}
if opts.SinceTime != nil {
params.Add("sinceTime", opts.SinceTime.Format(time.RFC3339))
}
if opts.TailLines != nil {
params.Add("tailLines", strconv.FormatInt(*opts.TailLines, 10))
}
if opts.LimitBytes != nil {
params.Add("limitBytes", strconv.FormatInt(*opts.LimitBytes, 10))
}
loc := &url.URL{
Scheme: nodeScheme,
Host: fmt.Sprintf("%s:%d", nodeHost, nodePort),
Path: fmt.Sprintf("/containerLogs/%s/%s/%s", pod.Namespace, pod.Name, container),
RawQuery: params.Encode(),
}
return loc, nodeTransport, nil
}
func podHasContainerWithName(pod *api.Pod, containerName string) bool {
for _, c := range pod.Spec.Containers {
if c.Name == containerName {
return true
}
}
return false
}
func streamParams(params url.Values, opts runtime.Object) error {
switch opts := opts.(type) {
case *api.PodExecOptions:
if opts.Stdin {
params.Add(api.ExecStdinParam, "1")
}
if opts.Stdout {
params.Add(api.ExecStdoutParam, "1")
}
if opts.Stderr {
params.Add(api.ExecStderrParam, "1")
}
if opts.TTY {
params.Add(api.ExecTTYParam, "1")
}
for _, c := range opts.Command {
params.Add("command", c)
}
case *api.PodAttachOptions:
if opts.Stdin {
params.Add(api.ExecStdinParam, "1")
}
if opts.Stdout {
params.Add(api.ExecStdoutParam, "1")
}
if opts.Stderr {
params.Add(api.ExecStderrParam, "1")
}
if opts.TTY {
params.Add(api.ExecTTYParam, "1")
}
default:
return fmt.Errorf("Unknown object for streaming: %v", opts)
}
return nil
}
// AttachLocation returns the attach URL for a pod container. If opts.Container is blank
// and only one container is present in the pod, that container is used.
func AttachLocation(
getter ResourceGetter,
connInfo client.ConnectionInfoGetter,
ctx api.Context,
name string,
opts *api.PodAttachOptions,
) (*url.URL, http.RoundTripper, error) {
return streamLocation(getter, connInfo, ctx, name, opts, opts.Container, "attach")
}
// ExecLocation returns the exec URL for a pod container. If opts.Container is blank
// and only one container is present in the pod, that container is used.
func ExecLocation(
getter ResourceGetter,
connInfo client.ConnectionInfoGetter,
ctx api.Context,
name string,
opts *api.PodExecOptions,
) (*url.URL, http.RoundTripper, error) {
return streamLocation(getter, connInfo, ctx, name, opts, opts.Container, "exec")
}
func streamLocation(
getter ResourceGetter,
connInfo client.ConnectionInfoGetter,
ctx api.Context,
name string,
opts runtime.Object,
container,
path string,
) (*url.URL, http.RoundTripper, error) {
pod, err := getPod(getter, ctx, name)
if err != nil {
return nil, nil, err
}
// Try to figure out a container
// If a container was provided, it must be valid
if container == "" {
switch len(pod.Spec.Containers) {
case 1:
container = pod.Spec.Containers[0].Name
case 0:
return nil, nil, errors.NewBadRequest(fmt.Sprintf("a container name must be specified for pod %s", name))
default:
containerNames := getContainerNames(pod)
return nil, nil, errors.NewBadRequest(fmt.Sprintf("a container name must be specified for pod %s, choose one of: [%s]", name, containerNames))
}
} else {
if !podHasContainerWithName(pod, container) {
return nil, nil, errors.NewBadRequest(fmt.Sprintf("container %s is not valid for pod %s", container, name))
}
}
nodeHost := pod.Spec.NodeName
if len(nodeHost) == 0 {
// If pod has not been assigned a host, return an empty location
return nil, nil, errors.NewBadRequest(fmt.Sprintf("pod %s does not have a host assigned", name))
}
nodeScheme, nodePort, nodeTransport, err := connInfo.GetConnectionInfo(ctx, nodeHost)
if err != nil {
return nil, nil, err
}
params := url.Values{}
if err := streamParams(params, opts); err != nil {
return nil, nil, err
}
loc := &url.URL{
Scheme: nodeScheme,
Host: fmt.Sprintf("%s:%d", nodeHost, nodePort),
Path: fmt.Sprintf("/%s/%s/%s/%s", path, pod.Namespace, pod.Name, container),
RawQuery: params.Encode(),
}
return loc, nodeTransport, nil
}
// PortForwardLocation returns the port-forward URL for a pod.
func PortForwardLocation(
getter ResourceGetter,
connInfo client.ConnectionInfoGetter,
ctx api.Context,
name string,
) (*url.URL, http.RoundTripper, error) {
pod, err := getPod(getter, ctx, name)
if err != nil {
return nil, nil, err
}
nodeHost := pod.Spec.NodeName
if len(nodeHost) == 0 {
// If pod has not been assigned a host, return an empty location
return nil, nil, errors.NewBadRequest(fmt.Sprintf("pod %s does not have a host assigned", name))
}
nodeScheme, nodePort, nodeTransport, err := connInfo.GetConnectionInfo(ctx, nodeHost)
if err != nil {
return nil, nil, err
}
loc := &url.URL{
Scheme: nodeScheme,
Host: fmt.Sprintf("%s:%d", nodeHost, nodePort),
Path: fmt.Sprintf("/portForward/%s/%s", pod.Namespace, pod.Name),
}
return loc, nodeTransport, nil
}